aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile6
-rw-r--r--arch/Kconfig13
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/include/asm/ftrace.h56
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm.h16
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/include/asm/perf_event.h2
-rw-r--r--arch/x86/include/asm/perf_regs.h33
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/svm.h205
-rw-r--r--arch/x86/include/asm/uprobes.h3
-rw-r--r--arch/x86/include/asm/vmx.h127
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c28
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h6
-rw-r--r--arch/x86/kernel/entry_32.S74
-rw-r--r--arch/x86/kernel/entry_64.S130
-rw-r--r--arch/x86/kernel/ftrace.c73
-rw-r--r--arch/x86/kernel/kprobes.c67
-rw-r--r--arch/x86/kernel/perf_regs.c105
-rw-r--r--arch/x86/kernel/step.c53
-rw-r--r--arch/x86/kernel/uprobes.c52
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c6
-rw-r--r--arch/x86/kvm/trace.h89
-rw-r--r--drivers/oprofile/cpu_buffer.c11
-rw-r--r--include/linux/ftrace.h158
-rw-r--r--include/linux/kprobes.h27
-rw-r--r--include/linux/perf_event.h60
-rw-r--r--include/linux/perf_regs.h25
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/uprobes.h15
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/events/callchain.c38
-rw-r--r--kernel/events/core.c214
-rw-r--r--kernel/events/internal.h82
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/events/uprobes.c248
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/kprobes.c247
-rw-r--r--kernel/trace/Kconfig10
-rw-r--r--kernel/trace/Makefile8
-rw-r--r--kernel/trace/ftrace.c322
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_events.c116
-rw-r--r--kernel/trace/trace_events_filter.c2
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_functions_graph.c5
-rw-r--r--kernel/trace/trace_irqsoff.c5
-rw-r--r--kernel/trace/trace_sched_wakeup.c5
-rw-r--r--kernel/trace/trace_selftest.c304
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c2
-rw-r--r--scripts/recordmcount.h4
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.c754
-rw-r--r--tools/lib/traceevent/event-parse.h46
-rw-r--r--tools/lib/traceevent/event-utils.h6
-rw-r--r--tools/perf/.gitignore2
-rw-r--r--tools/perf/Documentation/Makefile6
-rw-r--r--tools/perf/Documentation/jit-interface.txt15
-rw-r--r--tools/perf/Documentation/perf-annotate.txt3
-rw-r--r--tools/perf/Documentation/perf-diff.txt3
-rw-r--r--tools/perf/Documentation/perf-kvm.txt30
-rw-r--r--tools/perf/Documentation/perf-list.txt48
-rw-r--r--tools/perf/Documentation/perf-report.txt3
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt4
-rw-r--r--tools/perf/Documentation/perf-script-python.txt10
-rw-r--r--tools/perf/Documentation/perf-trace.txt53
-rw-r--r--tools/perf/MANIFEST4
-rw-r--r--tools/perf/Makefile173
-rw-r--r--tools/perf/arch/x86/Makefile3
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h80
-rw-r--r--tools/perf/arch/x86/util/unwind.c111
-rw-r--r--tools/perf/bash_completion26
-rw-r--r--tools/perf/bench/bench.h3
-rw-r--r--tools/perf/bench/mem-memcpy.c2
-rw-r--r--tools/perf/bench/mem-memset.c2
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/bench/sched-pipe.c10
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-bench.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c10
-rw-r--r--tools/perf/builtin-buildid-list.c7
-rw-r--r--tools/perf/builtin-diff.c96
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-help.c50
-rw-r--r--tools/perf/builtin-inject.c29
-rw-r--r--tools/perf/builtin-kmem.c234
-rw-r--r--tools/perf/builtin-kvm.c838
-rw-r--r--tools/perf/builtin-list.c16
-rw-r--r--tools/perf/builtin-lock.c414
-rw-r--r--tools/perf/builtin-probe.c24
-rw-r--r--tools/perf/builtin-record.c305
-rw-r--r--tools/perf/builtin-report.c47
-rw-r--r--tools/perf/builtin-sched.c1522
-rw-r--r--tools/perf/builtin-script.c229
-rw-r--r--tools/perf/builtin-stat.c136
-rw-r--r--tools/perf/builtin-test.c353
-rw-r--r--tools/perf/builtin-timechart.c70
-rw-r--r--tools/perf/builtin-top.c33
-rw-r--r--tools/perf/builtin-trace.c310
-rw-r--r--tools/perf/builtin.h2
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/config/feature-tests.mak50
-rw-r--r--tools/perf/perf-archive.sh6
-rw-r--r--tools/perf/perf.c75
-rw-r--r--tools/perf/perf.h9
-rwxr-xr-xtools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py94
-rw-r--r--tools/perf/scripts/python/bin/event_analyzing_sample-record8
-rw-r--r--tools/perf/scripts/python/bin/event_analyzing_sample-report3
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py189
-rw-r--r--tools/perf/ui/browser.c7
-rw-r--r--tools/perf/ui/browsers/annotate.c6
-rw-r--r--tools/perf/ui/browsers/hists.c133
-rw-r--r--tools/perf/ui/gtk/browser.c111
-rw-r--r--tools/perf/ui/gtk/gtk.h3
-rw-r--r--tools/perf/ui/gtk/helpline.c56
-rw-r--r--tools/perf/ui/gtk/setup.c6
-rw-r--r--tools/perf/ui/gtk/util.c9
-rw-r--r--tools/perf/ui/helpline.c56
-rw-r--r--tools/perf/ui/helpline.h33
-rw-r--r--tools/perf/ui/hist.c390
-rw-r--r--tools/perf/ui/setup.c10
-rw-r--r--tools/perf/ui/stdio/hist.c498
-rw-r--r--tools/perf/ui/tui/helpline.c57
-rw-r--r--tools/perf/ui/tui/setup.c10
-rw-r--r--tools/perf/util/alias.c3
-rw-r--r--tools/perf/util/annotate.c19
-rw-r--r--tools/perf/util/annotate.h15
-rw-r--r--tools/perf/util/build-id.c11
-rw-r--r--tools/perf/util/cache.h6
-rw-r--r--tools/perf/util/callchain.c6
-rw-r--r--tools/perf/util/cgroup.c4
-rw-r--r--tools/perf/util/config.c6
-rw-r--r--tools/perf/util/cpumap.c22
-rw-r--r--tools/perf/util/cpumap.h13
-rw-r--r--tools/perf/util/debug.c4
-rw-r--r--tools/perf/util/debug.h17
-rw-r--r--tools/perf/util/dso-test-data.c2
-rw-r--r--tools/perf/util/dwarf-aux.c2
-rw-r--r--tools/perf/util/event.c71
-rw-r--r--tools/perf/util/event.h14
-rw-r--r--tools/perf/util/evlist.c143
-rw-r--r--tools/perf/util/evlist.h35
-rw-r--r--tools/perf/util/evsel.c276
-rw-r--r--tools/perf/util/evsel.h63
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh15
-rw-r--r--tools/perf/util/header.c1063
-rw-r--r--tools/perf/util/header.h29
-rw-r--r--tools/perf/util/help.c4
-rw-r--r--tools/perf/util/hist.c721
-rw-r--r--tools/perf/util/hist.h75
-rw-r--r--tools/perf/util/include/linux/bitops.h4
-rw-r--r--tools/perf/util/include/linux/compiler.h9
-rw-r--r--tools/perf/util/include/linux/kernel.h17
-rw-r--r--tools/perf/util/include/linux/magic.h12
-rw-r--r--tools/perf/util/include/linux/rbtree.h1
-rw-r--r--tools/perf/util/include/linux/string.h2
-rw-r--r--tools/perf/util/include/linux/types.h8
-rw-r--r--tools/perf/util/intlist.c8
-rw-r--r--tools/perf/util/map.c47
-rw-r--r--tools/perf/util/map.h9
-rw-r--r--tools/perf/util/parse-events-test.c424
-rw-r--r--tools/perf/util/parse-events.c254
-rw-r--r--tools/perf/util/parse-events.h18
-rw-r--r--tools/perf/util/parse-events.l56
-rw-r--r--tools/perf/util/parse-events.y125
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/perf_regs.h14
-rw-r--r--tools/perf/util/pmu.c80
-rw-r--r--tools/perf/util/pmu.h3
-rw-r--r--tools/perf/util/pmu.y6
-rw-r--r--tools/perf/util/probe-event.c69
-rw-r--r--tools/perf/util/probe-finder.c28
-rw-r--r--tools/perf/util/python.c17
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c50
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c113
-rw-r--r--tools/perf/util/session.c198
-rw-r--r--tools/perf/util/session.h10
-rw-r--r--tools/perf/util/sort.c25
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat.c57
-rw-r--r--tools/perf/util/stat.h16
-rw-r--r--tools/perf/util/string.c18
-rw-r--r--tools/perf/util/strlist.c2
-rw-r--r--tools/perf/util/symbol-elf.c841
-rw-r--r--tools/perf/util/symbol-minimal.c307
-rw-r--r--tools/perf/util/symbol.c942
-rw-r--r--tools/perf/util/symbol.h67
-rw-r--r--tools/perf/util/target.c4
-rw-r--r--tools/perf/util/thread.h2
-rw-r--r--tools/perf/util/top.c3
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/trace-event-parse.c54
-rw-r--r--tools/perf/util/trace-event-scripting.c34
-rw-r--r--tools/perf/util/trace-event.h12
-rw-r--r--tools/perf/util/unwind.c571
-rw-r--r--tools/perf/util/unwind.h35
-rw-r--r--tools/perf/util/util.c25
-rw-r--r--tools/perf/util/util.h9
-rw-r--r--tools/perf/util/vdso.c111
-rw-r--r--tools/perf/util/vdso.h18
-rw-r--r--tools/perf/util/wrapper.c3
-rw-r--r--tools/scripts/Makefile.include6
208 files changed, 13242 insertions, 5240 deletions
diff --git a/Makefile b/Makefile
index bb9fff26f07..846dd760785 100644
--- a/Makefile
+++ b/Makefile
@@ -609,7 +609,11 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly)
endif
ifdef CONFIG_FUNCTION_TRACER
-KBUILD_CFLAGS += -pg
+ifdef CONFIG_HAVE_FENTRY
+CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
+endif
+KBUILD_CFLAGS += -pg $(CC_USING_FENTRY)
+KBUILD_AFLAGS += $(CC_USING_FENTRY)
ifdef CONFIG_DYNAMIC_FTRACE
ifdef CONFIG_HAVE_C_RECORDMCOUNT
BUILD_C_RECORDMCOUNT := y
diff --git a/arch/Kconfig b/arch/Kconfig
index 1401a758797..1a7b468abf4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -222,6 +222,19 @@ config HAVE_PERF_EVENTS_NMI
subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period.
+config HAVE_PERF_REGS
+ bool
+ help
+ Support selective register dumps for perf events. This includes
+ bit-mapping of each registers and a unique architecture id.
+
+config HAVE_PERF_USER_STACK_DUMP
+ bool
+ help
+ Support user stack dumps for perf event samples. This needs
+ access to the user stack pointer which is not unified across
+ architectures.
+
config HAVE_ARCH_JUMP_LABEL
bool
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 20c49b8450b..8ff1f56a018 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -36,6 +36,7 @@ config X86
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FENTRY if X86_64
select HAVE_C_RECORDMCOUNT
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
@@ -60,6 +61,8 @@ config X86
select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select ANON_INODES
select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386
select HAVE_CMPXCHG_LOCAL if !M386
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b0767bc0874..9a25b522d37 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,38 +3,54 @@
#ifdef __ASSEMBLY__
- .macro MCOUNT_SAVE_FRAME
- /* taken from glibc */
- subq $0x38, %rsp
- movq %rax, (%rsp)
- movq %rcx, 8(%rsp)
- movq %rdx, 16(%rsp)
- movq %rsi, 24(%rsp)
- movq %rdi, 32(%rsp)
- movq %r8, 40(%rsp)
- movq %r9, 48(%rsp)
+ /* skip is set if the stack was already partially adjusted */
+ .macro MCOUNT_SAVE_FRAME skip=0
+ /*
+ * We add enough stack to save all regs.
+ */
+ subq $(SS+8-\skip), %rsp
+ movq %rax, RAX(%rsp)
+ movq %rcx, RCX(%rsp)
+ movq %rdx, RDX(%rsp)
+ movq %rsi, RSI(%rsp)
+ movq %rdi, RDI(%rsp)
+ movq %r8, R8(%rsp)
+ movq %r9, R9(%rsp)
+ /* Move RIP to its proper location */
+ movq SS+8(%rsp), %rdx
+ movq %rdx, RIP(%rsp)
.endm
- .macro MCOUNT_RESTORE_FRAME
- movq 48(%rsp), %r9
- movq 40(%rsp), %r8
- movq 32(%rsp), %rdi
- movq 24(%rsp), %rsi
- movq 16(%rsp), %rdx
- movq 8(%rsp), %rcx
- movq (%rsp), %rax
- addq $0x38, %rsp
+ .macro MCOUNT_RESTORE_FRAME skip=0
+ movq R9(%rsp), %r9
+ movq R8(%rsp), %r8
+ movq RDI(%rsp), %rdi
+ movq RSI(%rsp), %rsi
+ movq RDX(%rsp), %rdx
+ movq RCX(%rsp), %rcx
+ movq RAX(%rsp), %rax
+ addq $(SS+8-\skip), %rsp
.endm
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((long)(mcount))
+#ifdef CC_USING_FENTRY
+# define MCOUNT_ADDR ((long)(__fentry__))
+#else
+# define MCOUNT_ADDR ((long)(mcount))
+#endif
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#endif
+
#ifndef __ASSEMBLY__
extern void mcount(void);
extern atomic_t modifying_ftrace_code;
+extern void __fentry__(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 54788253915..d3ddd17405d 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -27,6 +27,7 @@
#include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
struct pt_regs;
struct kprobe;
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 246617efd67..41e08cb6a09 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -9,6 +9,22 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define DE_VECTOR 0
+#define DB_VECTOR 1
+#define BP_VECTOR 3
+#define OF_VECTOR 4
+#define BR_VECTOR 5
+#define UD_VECTOR 6
+#define NM_VECTOR 7
+#define DF_VECTOR 8
+#define TS_VECTOR 10
+#define NP_VECTOR 11
+#define SS_VECTOR 12
+#define GP_VECTOR 13
+#define PF_VECTOR 14
+#define MF_VECTOR 16
+#define MC_VECTOR 18
+
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT
#define __KVM_HAVE_IOAPIC
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d64cf7..1eaa6b05667 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -75,22 +75,6 @@
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
-#define DE_VECTOR 0
-#define DB_VECTOR 1
-#define BP_VECTOR 3
-#define OF_VECTOR 4
-#define BR_VECTOR 5
-#define UD_VECTOR 6
-#define NM_VECTOR 7
-#define DF_VECTOR 8
-#define TS_VECTOR 10
-#define NP_VECTOR 11
-#define SS_VECTOR 12
-#define GP_VECTOR 13
-#define PF_VECTOR 14
-#define MF_VECTOR 16
-#define MC_VECTOR 18
-
#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index cb4e43bce98..4fabcdf1cfa 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -262,4 +262,6 @@ static inline void perf_check_microcode(void) { }
static inline void amd_pmu_disable_virt(void) { }
#endif
+#define arch_perf_out_copy_user copy_from_user_nmi
+
#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/perf_regs.h b/arch/x86/include/asm/perf_regs.h
new file mode 100644
index 00000000000..3f2207bfd17
--- /dev/null
+++ b/arch/x86/include/asm/perf_regs.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_X86_PERF_REGS_H
+#define _ASM_X86_PERF_REGS_H
+
+enum perf_event_x86_regs {
+ PERF_REG_X86_AX,
+ PERF_REG_X86_BX,
+ PERF_REG_X86_CX,
+ PERF_REG_X86_DX,
+ PERF_REG_X86_SI,
+ PERF_REG_X86_DI,
+ PERF_REG_X86_BP,
+ PERF_REG_X86_SP,
+ PERF_REG_X86_IP,
+ PERF_REG_X86_FLAGS,
+ PERF_REG_X86_CS,
+ PERF_REG_X86_SS,
+ PERF_REG_X86_DS,
+ PERF_REG_X86_ES,
+ PERF_REG_X86_FS,
+ PERF_REG_X86_GS,
+ PERF_REG_X86_R8,
+ PERF_REG_X86_R9,
+ PERF_REG_X86_R10,
+ PERF_REG_X86_R11,
+ PERF_REG_X86_R12,
+ PERF_REG_X86_R13,
+ PERF_REG_X86_R14,
+ PERF_REG_X86_R15,
+
+ PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
+ PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+};
+#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d048cad9bca..433d2e5c98a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -759,6 +759,8 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
+extern void set_task_blockstep(struct task_struct *task, bool on);
+
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f2b83bc7d78..cdf5674dd23 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -1,6 +1,135 @@
#ifndef __SVM_H
#define __SVM_H
+#define SVM_EXIT_READ_CR0 0x000
+#define SVM_EXIT_READ_CR3 0x003
+#define SVM_EXIT_READ_CR4 0x004
+#define SVM_EXIT_READ_CR8 0x008
+#define SVM_EXIT_WRITE_CR0 0x010
+#define SVM_EXIT_WRITE_CR3 0x013
+#define SVM_EXIT_WRITE_CR4 0x014
+#define SVM_EXIT_WRITE_CR8 0x018
+#define SVM_EXIT_READ_DR0 0x020
+#define SVM_EXIT_READ_DR1 0x021
+#define SVM_EXIT_READ_DR2 0x022
+#define SVM_EXIT_READ_DR3 0x023
+#define SVM_EXIT_READ_DR4 0x024
+#define SVM_EXIT_READ_DR5 0x025
+#define SVM_EXIT_READ_DR6 0x026
+#define SVM_EXIT_READ_DR7 0x027
+#define SVM_EXIT_WRITE_DR0 0x030
+#define SVM_EXIT_WRITE_DR1 0x031
+#define SVM_EXIT_WRITE_DR2 0x032
+#define SVM_EXIT_WRITE_DR3 0x033
+#define SVM_EXIT_WRITE_DR4 0x034
+#define SVM_EXIT_WRITE_DR5 0x035
+#define SVM_EXIT_WRITE_DR6 0x036
+#define SVM_EXIT_WRITE_DR7 0x037
+#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_INTR 0x060
+#define SVM_EXIT_NMI 0x061
+#define SVM_EXIT_SMI 0x062
+#define SVM_EXIT_INIT 0x063
+#define SVM_EXIT_VINTR 0x064
+#define SVM_EXIT_CR0_SEL_WRITE 0x065
+#define SVM_EXIT_IDTR_READ 0x066
+#define SVM_EXIT_GDTR_READ 0x067
+#define SVM_EXIT_LDTR_READ 0x068
+#define SVM_EXIT_TR_READ 0x069
+#define SVM_EXIT_IDTR_WRITE 0x06a
+#define SVM_EXIT_GDTR_WRITE 0x06b
+#define SVM_EXIT_LDTR_WRITE 0x06c
+#define SVM_EXIT_TR_WRITE 0x06d
+#define SVM_EXIT_RDTSC 0x06e
+#define SVM_EXIT_RDPMC 0x06f
+#define SVM_EXIT_PUSHF 0x070
+#define SVM_EXIT_POPF 0x071
+#define SVM_EXIT_CPUID 0x072
+#define SVM_EXIT_RSM 0x073
+#define SVM_EXIT_IRET 0x074
+#define SVM_EXIT_SWINT 0x075
+#define SVM_EXIT_INVD 0x076
+#define SVM_EXIT_PAUSE 0x077
+#define SVM_EXIT_HLT 0x078
+#define SVM_EXIT_INVLPG 0x079
+#define SVM_EXIT_INVLPGA 0x07a
+#define SVM_EXIT_IOIO 0x07b
+#define SVM_EXIT_MSR 0x07c
+#define SVM_EXIT_TASK_SWITCH 0x07d
+#define SVM_EXIT_FERR_FREEZE 0x07e
+#define SVM_EXIT_SHUTDOWN 0x07f
+#define SVM_EXIT_VMRUN 0x080
+#define SVM_EXIT_VMMCALL 0x081
+#define SVM_EXIT_VMLOAD 0x082
+#define SVM_EXIT_VMSAVE 0x083
+#define SVM_EXIT_STGI 0x084
+#define SVM_EXIT_CLGI 0x085
+#define SVM_EXIT_SKINIT 0x086
+#define SVM_EXIT_RDTSCP 0x087
+#define SVM_EXIT_ICEBP 0x088
+#define SVM_EXIT_WBINVD 0x089
+#define SVM_EXIT_MONITOR 0x08a
+#define SVM_EXIT_MWAIT 0x08b
+#define SVM_EXIT_MWAIT_COND 0x08c
+#define SVM_EXIT_XSETBV 0x08d
+#define SVM_EXIT_NPF 0x400
+
+#define SVM_EXIT_ERR -1
+
+#define SVM_EXIT_REASONS \
+ { SVM_EXIT_READ_CR0, "read_cr0" }, \
+ { SVM_EXIT_READ_CR3, "read_cr3" }, \
+ { SVM_EXIT_READ_CR4, "read_cr4" }, \
+ { SVM_EXIT_READ_CR8, "read_cr8" }, \
+ { SVM_EXIT_WRITE_CR0, "write_cr0" }, \
+ { SVM_EXIT_WRITE_CR3, "write_cr3" }, \
+ { SVM_EXIT_WRITE_CR4, "write_cr4" }, \
+ { SVM_EXIT_WRITE_CR8, "write_cr8" }, \
+ { SVM_EXIT_READ_DR0, "read_dr0" }, \
+ { SVM_EXIT_READ_DR1, "read_dr1" }, \
+ { SVM_EXIT_READ_DR2, "read_dr2" }, \
+ { SVM_EXIT_READ_DR3, "read_dr3" }, \
+ { SVM_EXIT_WRITE_DR0, "write_dr0" }, \
+ { SVM_EXIT_WRITE_DR1, "write_dr1" }, \
+ { SVM_EXIT_WRITE_DR2, "write_dr2" }, \
+ { SVM_EXIT_WRITE_DR3, "write_dr3" }, \
+ { SVM_EXIT_WRITE_DR5, "write_dr5" }, \
+ { SVM_EXIT_WRITE_DR7, "write_dr7" }, \
+ { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
+ { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
+ { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
+ { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
+ { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
+ { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
+ { SVM_EXIT_INTR, "interrupt" }, \
+ { SVM_EXIT_NMI, "nmi" }, \
+ { SVM_EXIT_SMI, "smi" }, \
+ { SVM_EXIT_INIT, "init" }, \
+ { SVM_EXIT_VINTR, "vintr" }, \
+ { SVM_EXIT_CPUID, "cpuid" }, \
+ { SVM_EXIT_INVD, "invd" }, \
+ { SVM_EXIT_HLT, "hlt" }, \
+ { SVM_EXIT_INVLPG, "invlpg" }, \
+ { SVM_EXIT_INVLPGA, "invlpga" }, \
+ { SVM_EXIT_IOIO, "io" }, \
+ { SVM_EXIT_MSR, "msr" }, \
+ { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
+ { SVM_EXIT_SHUTDOWN, "shutdown" }, \
+ { SVM_EXIT_VMRUN, "vmrun" }, \
+ { SVM_EXIT_VMMCALL, "hypercall" }, \
+ { SVM_EXIT_VMLOAD, "vmload" }, \
+ { SVM_EXIT_VMSAVE, "vmsave" }, \
+ { SVM_EXIT_STGI, "stgi" }, \
+ { SVM_EXIT_CLGI, "clgi" }, \
+ { SVM_EXIT_SKINIT, "skinit" }, \
+ { SVM_EXIT_WBINVD, "wbinvd" }, \
+ { SVM_EXIT_MONITOR, "monitor" }, \
+ { SVM_EXIT_MWAIT, "mwait" }, \
+ { SVM_EXIT_XSETBV, "xsetbv" }, \
+ { SVM_EXIT_NPF, "npf" }
+
+#ifdef __KERNEL__
+
enum {
INTERCEPT_INTR,
INTERCEPT_NMI,
@@ -264,81 +393,6 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFO_REG_MASK 0x0F
-#define SVM_EXIT_READ_CR0 0x000
-#define SVM_EXIT_READ_CR3 0x003
-#define SVM_EXIT_READ_CR4 0x004
-#define SVM_EXIT_READ_CR8 0x008
-#define SVM_EXIT_WRITE_CR0 0x010
-#define SVM_EXIT_WRITE_CR3 0x013
-#define SVM_EXIT_WRITE_CR4 0x014
-#define SVM_EXIT_WRITE_CR8 0x018
-#define SVM_EXIT_READ_DR0 0x020
-#define SVM_EXIT_READ_DR1 0x021
-#define SVM_EXIT_READ_DR2 0x022
-#define SVM_EXIT_READ_DR3 0x023
-#define SVM_EXIT_READ_DR4 0x024
-#define SVM_EXIT_READ_DR5 0x025
-#define SVM_EXIT_READ_DR6 0x026
-#define SVM_EXIT_READ_DR7 0x027
-#define SVM_EXIT_WRITE_DR0 0x030
-#define SVM_EXIT_WRITE_DR1 0x031
-#define SVM_EXIT_WRITE_DR2 0x032
-#define SVM_EXIT_WRITE_DR3 0x033
-#define SVM_EXIT_WRITE_DR4 0x034
-#define SVM_EXIT_WRITE_DR5 0x035
-#define SVM_EXIT_WRITE_DR6 0x036
-#define SVM_EXIT_WRITE_DR7 0x037
-#define SVM_EXIT_EXCP_BASE 0x040
-#define SVM_EXIT_INTR 0x060
-#define SVM_EXIT_NMI 0x061
-#define SVM_EXIT_SMI 0x062
-#define SVM_EXIT_INIT 0x063
-#define SVM_EXIT_VINTR 0x064
-#define SVM_EXIT_CR0_SEL_WRITE 0x065
-#define SVM_EXIT_IDTR_READ 0x066
-#define SVM_EXIT_GDTR_READ 0x067
-#define SVM_EXIT_LDTR_READ 0x068
-#define SVM_EXIT_TR_READ 0x069
-#define SVM_EXIT_IDTR_WRITE 0x06a
-#define SVM_EXIT_GDTR_WRITE 0x06b
-#define SVM_EXIT_LDTR_WRITE 0x06c
-#define SVM_EXIT_TR_WRITE 0x06d
-#define SVM_EXIT_RDTSC 0x06e
-#define SVM_EXIT_RDPMC 0x06f
-#define SVM_EXIT_PUSHF 0x070
-#define SVM_EXIT_POPF 0x071
-#define SVM_EXIT_CPUID 0x072
-#define SVM_EXIT_RSM 0x073
-#define SVM_EXIT_IRET 0x074
-#define SVM_EXIT_SWINT 0x075
-#define SVM_EXIT_INVD 0x076
-#define SVM_EXIT_PAUSE 0x077
-#define SVM_EXIT_HLT 0x078
-#define SVM_EXIT_INVLPG 0x079
-#define SVM_EXIT_INVLPGA 0x07a
-#define SVM_EXIT_IOIO 0x07b
-#define SVM_EXIT_MSR 0x07c
-#define SVM_EXIT_TASK_SWITCH 0x07d
-#define SVM_EXIT_FERR_FREEZE 0x07e
-#define SVM_EXIT_SHUTDOWN 0x07f
-#define SVM_EXIT_VMRUN 0x080
-#define SVM_EXIT_VMMCALL 0x081
-#define SVM_EXIT_VMLOAD 0x082
-#define SVM_EXIT_VMSAVE 0x083
-#define SVM_EXIT_STGI 0x084
-#define SVM_EXIT_CLGI 0x085
-#define SVM_EXIT_SKINIT 0x086
-#define SVM_EXIT_RDTSCP 0x087
-#define SVM_EXIT_ICEBP 0x088
-#define SVM_EXIT_WBINVD 0x089
-#define SVM_EXIT_MONITOR 0x08a
-#define SVM_EXIT_MWAIT 0x08b
-#define SVM_EXIT_MWAIT_COND 0x08c
-#define SVM_EXIT_XSETBV 0x08d
-#define SVM_EXIT_NPF 0x400
-
-#define SVM_EXIT_ERR -1
-
#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
@@ -350,3 +404,4 @@ struct __attribute__ ((__packed__)) vmcb {
#endif
+#endif
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index f3971bbcd1d..8ff8be7835a 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -42,10 +42,11 @@ struct arch_uprobe {
};
struct arch_uprobe_task {
- unsigned long saved_trap_nr;
#ifdef CONFIG_X86_64
unsigned long saved_scratch_register;
#endif
+ unsigned int saved_trap_nr;
+ unsigned int saved_tf;
};
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 74fcb963595..36ec21c36d6 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,88 @@
*
*/
+#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
+
+#define EXIT_REASON_EXCEPTION_NMI 0
+#define EXIT_REASON_EXTERNAL_INTERRUPT 1
+#define EXIT_REASON_TRIPLE_FAULT 2
+
+#define EXIT_REASON_PENDING_INTERRUPT 7
+#define EXIT_REASON_NMI_WINDOW 8
+#define EXIT_REASON_TASK_SWITCH 9
+#define EXIT_REASON_CPUID 10
+#define EXIT_REASON_HLT 12
+#define EXIT_REASON_INVD 13
+#define EXIT_REASON_INVLPG 14
+#define EXIT_REASON_RDPMC 15
+#define EXIT_REASON_RDTSC 16
+#define EXIT_REASON_VMCALL 18
+#define EXIT_REASON_VMCLEAR 19
+#define EXIT_REASON_VMLAUNCH 20
+#define EXIT_REASON_VMPTRLD 21
+#define EXIT_REASON_VMPTRST 22
+#define EXIT_REASON_VMREAD 23
+#define EXIT_REASON_VMRESUME 24
+#define EXIT_REASON_VMWRITE 25
+#define EXIT_REASON_VMOFF 26
+#define EXIT_REASON_VMON 27
+#define EXIT_REASON_CR_ACCESS 28
+#define EXIT_REASON_DR_ACCESS 29
+#define EXIT_REASON_IO_INSTRUCTION 30
+#define EXIT_REASON_MSR_READ 31
+#define EXIT_REASON_MSR_WRITE 32
+#define EXIT_REASON_INVALID_STATE 33
+#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_INSTRUCTION 39
+#define EXIT_REASON_PAUSE_INSTRUCTION 40
+#define EXIT_REASON_MCE_DURING_VMENTRY 41
+#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
+#define EXIT_REASON_APIC_ACCESS 44
+#define EXIT_REASON_EPT_VIOLATION 48
+#define EXIT_REASON_EPT_MISCONFIG 49
+#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_INVPCID 58
+
+#define VMX_EXIT_REASONS \
+ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
+ { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
+ { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
+ { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
+ { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
+ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
+ { EXIT_REASON_CPUID, "CPUID" }, \
+ { EXIT_REASON_HLT, "HLT" }, \
+ { EXIT_REASON_INVLPG, "INVLPG" }, \
+ { EXIT_REASON_RDPMC, "RDPMC" }, \
+ { EXIT_REASON_RDTSC, "RDTSC" }, \
+ { EXIT_REASON_VMCALL, "VMCALL" }, \
+ { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
+ { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
+ { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
+ { EXIT_REASON_VMPTRST, "VMPTRST" }, \
+ { EXIT_REASON_VMREAD, "VMREAD" }, \
+ { EXIT_REASON_VMRESUME, "VMRESUME" }, \
+ { EXIT_REASON_VMWRITE, "VMWRITE" }, \
+ { EXIT_REASON_VMOFF, "VMOFF" }, \
+ { EXIT_REASON_VMON, "VMON" }, \
+ { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
+ { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
+ { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
+ { EXIT_REASON_MSR_READ, "MSR_READ" }, \
+ { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
+ { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
+ { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
+ { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
+ { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
+ { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
+ { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
+ { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
+ { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
+ { EXIT_REASON_WBINVD, "WBINVD" }
+
+#ifdef __KERNEL__
+
#include <linux/types.h>
/*
@@ -241,49 +323,6 @@ enum vmcs_field {
HOST_RIP = 0x00006c16,
};
-#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
-
-#define EXIT_REASON_EXCEPTION_NMI 0
-#define EXIT_REASON_EXTERNAL_INTERRUPT 1
-#define EXIT_REASON_TRIPLE_FAULT 2
-
-#define EXIT_REASON_PENDING_INTERRUPT 7
-#define EXIT_REASON_NMI_WINDOW 8
-#define EXIT_REASON_TASK_SWITCH 9
-#define EXIT_REASON_CPUID 10
-#define EXIT_REASON_HLT 12
-#define EXIT_REASON_INVD 13
-#define EXIT_REASON_INVLPG 14
-#define EXIT_REASON_RDPMC 15
-#define EXIT_REASON_RDTSC 16
-#define EXIT_REASON_VMCALL 18
-#define EXIT_REASON_VMCLEAR 19
-#define EXIT_REASON_VMLAUNCH 20
-#define EXIT_REASON_VMPTRLD 21
-#define EXIT_REASON_VMPTRST 22
-#define EXIT_REASON_VMREAD 23
-#define EXIT_REASON_VMRESUME 24
-#define EXIT_REASON_VMWRITE 25
-#define EXIT_REASON_VMOFF 26
-#define EXIT_REASON_VMON 27
-#define EXIT_REASON_CR_ACCESS 28
-#define EXIT_REASON_DR_ACCESS 29
-#define EXIT_REASON_IO_INSTRUCTION 30
-#define EXIT_REASON_MSR_READ 31
-#define EXIT_REASON_MSR_WRITE 32
-#define EXIT_REASON_INVALID_STATE 33
-#define EXIT_REASON_MWAIT_INSTRUCTION 36
-#define EXIT_REASON_MONITOR_INSTRUCTION 39
-#define EXIT_REASON_PAUSE_INSTRUCTION 40
-#define EXIT_REASON_MCE_DURING_VMENTRY 41
-#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
-#define EXIT_REASON_APIC_ACCESS 44
-#define EXIT_REASON_EPT_VIOLATION 48
-#define EXIT_REASON_EPT_MISCONFIG 49
-#define EXIT_REASON_WBINVD 54
-#define EXIT_REASON_XSETBV 55
-#define EXIT_REASON_INVPCID 58
-
/*
* Interruption-information format
*/
@@ -488,3 +527,5 @@ enum vm_instruction_error_number {
};
#endif
+
+#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 8215e5652d9..8d7a619718b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -100,6 +100,8 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_UPROBES) += uprobes.o
+obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
+
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 38e4894165b..db917ec8904 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2347,6 +2347,27 @@ int uncore_pmu_event_init(struct perf_event *event)
return ret;
}
+static ssize_t uncore_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
+
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ return n;
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
+
+static struct attribute *uncore_pmu_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group uncore_pmu_attr_group = {
+ .attrs = uncore_pmu_attrs,
+};
+
static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
{
int ret;
@@ -2384,8 +2405,8 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
free_percpu(type->pmus[i].box);
kfree(type->pmus);
type->pmus = NULL;
- kfree(type->attr_groups[1]);
- type->attr_groups[1] = NULL;
+ kfree(type->events_group);
+ type->events_group = NULL;
}
static void __init uncore_types_exit(struct intel_uncore_type **types)
@@ -2437,9 +2458,10 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
for (j = 0; j < i; j++)
attrs[j] = &type->event_descs[j].attr.attr;
- type->attr_groups[1] = events_group;
+ type->events_group = events_group;
}
+ type->pmu_group = &uncore_pmu_attr_group;
type->pmus = pmus;
return 0;
fail:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 5b81c1856aa..e68a4550e95 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -369,10 +369,12 @@ struct intel_uncore_type {
struct intel_uncore_pmu *pmus;
struct intel_uncore_ops *ops;
struct uncore_event_desc *event_descs;
- const struct attribute_group *attr_groups[3];
+ const struct attribute_group *attr_groups[4];
};
-#define format_group attr_groups[0]
+#define pmu_group attr_groups[0]
+#define format_group attr_groups[1]
+#define events_group attr_groups[2]
struct intel_uncore_ops {
void (*init_box)(struct intel_uncore_box *);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 623f2883747..f438a44bf8f 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller)
pushl %eax
pushl %ecx
pushl %edx
- movl 0xc(%esp), %eax
+ pushl $0 /* Pass NULL as regs pointer */
+ movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
+ leal function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
ftrace_call:
call ftrace_stub
+ addl $4,%esp /* skip NULL pointer */
popl %edx
popl %ecx
popl %eax
+ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -1131,6 +1135,71 @@ ftrace_stub:
ret
END(ftrace_caller)
+ENTRY(ftrace_regs_caller)
+ pushf /* push flags before compare (in cs location) */
+ cmpl $0, function_trace_stop
+ jne ftrace_restore_flags
+
+ /*
+ * i386 does not save SS and ESP when coming from kernel.
+ * Instead, to get sp, &regs->sp is used (see ptrace.h).
+ * Unfortunately, that means eflags must be at the same location
+ * as the current return ip is. We move the return ip into the
+ * ip location, and move flags into the return ip location.
+ */
+ pushl 4(%esp) /* save return ip into ip slot */
+
+ pushl $0 /* Load 0 into orig_ax */
+ pushl %gs
+ pushl %fs
+ pushl %es
+ pushl %ds
+ pushl %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+
+ movl 13*4(%esp), %eax /* Get the saved flags */
+ movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
+ /* clobbering return ip */
+ movl $__KERNEL_CS,13*4(%esp)
+
+ movl 12*4(%esp), %eax /* Load ip (1st parameter) */
+ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
+ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
+ leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ pushl %esp /* Save pt_regs as 4th parameter */
+
+GLOBAL(ftrace_regs_call)
+ call ftrace_stub
+
+ addl $4, %esp /* Skip pt_regs */
+ movl 14*4(%esp), %eax /* Move flags back into cs */
+ movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
+ movl 12*4(%esp), %eax /* Get return ip from regs->ip */
+ movl %eax, 14*4(%esp) /* Put return ip back for ret */
+
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ popl %ds
+ popl %es
+ popl %fs
+ popl %gs
+ addl $8, %esp /* Skip orig_ax and ip */
+ popf /* Pop flags at end (no addl to corrupt flags) */
+ jmp ftrace_ret
+
+ftrace_restore_flags:
+ popf
+ jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)
@@ -1171,9 +1240,6 @@ END(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
- cmpl $0, function_trace_stop
- jne ftrace_stub
-
pushl %eax
pushl %ecx
pushl %edx
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1a8f3cbb6ee..4f0322e4ece 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -69,25 +69,51 @@
.section .entry.text, "ax"
#ifdef CONFIG_FUNCTION_TRACER
+
+#ifdef CC_USING_FENTRY
+# define function_hook __fentry__
+#else
+# define function_hook mcount
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(mcount)
+
+ENTRY(function_hook)
retq
-END(mcount)
+END(function_hook)
+
+/* skip is set if stack has been adjusted */
+.macro ftrace_caller_setup skip=0
+ MCOUNT_SAVE_FRAME \skip
+
+ /* Load the ftrace_ops into the 3rd parameter */
+ leaq function_trace_op, %rdx
+
+ /* Load ip into the first parameter */
+ movq RIP(%rsp), %rdi
+ subq $MCOUNT_INSN_SIZE, %rdi
+ /* Load the parent_ip into the second parameter */
+#ifdef CC_USING_FENTRY
+ movq SS+16(%rsp), %rsi
+#else
+ movq 8(%rbp), %rsi
+#endif
+.endm
ENTRY(ftrace_caller)
+ /* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_stub
- MCOUNT_SAVE_FRAME
-
- movq 0x38(%rsp), %rdi
- movq 8(%rbp), %rsi
- subq $MCOUNT_INSN_SIZE, %rdi
+ ftrace_caller_setup
+ /* regs go into 4th parameter (but make it NULL) */
+ movq $0, %rcx
GLOBAL(ftrace_call)
call ftrace_stub
MCOUNT_RESTORE_FRAME
+ftrace_return:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call)
@@ -98,8 +124,78 @@ GLOBAL(ftrace_stub)
retq
END(ftrace_caller)
+ENTRY(ftrace_regs_caller)
+ /* Save the current flags before compare (in SS location)*/
+ pushfq
+
+ /* Check if tracing was disabled (quick check) */
+ cmpl $0, function_trace_stop
+ jne ftrace_restore_flags
+
+ /* skip=8 to skip flags saved in SS */
+ ftrace_caller_setup 8
+
+ /* Save the rest of pt_regs */
+ movq %r15, R15(%rsp)
+ movq %r14, R14(%rsp)
+ movq %r13, R13(%rsp)
+ movq %r12, R12(%rsp)
+ movq %r11, R11(%rsp)
+ movq %r10, R10(%rsp)
+ movq %rbp, RBP(%rsp)
+ movq %rbx, RBX(%rsp)
+ /* Copy saved flags */
+ movq SS(%rsp), %rcx
+ movq %rcx, EFLAGS(%rsp)
+ /* Kernel segments */
+ movq $__KERNEL_DS, %rcx
+ movq %rcx, SS(%rsp)
+ movq $__KERNEL_CS, %rcx
+ movq %rcx, CS(%rsp)
+ /* Stack - skipping return address */
+ leaq SS+16(%rsp), %rcx
+ movq %rcx, RSP(%rsp)
+
+ /* regs go into 4th parameter */
+ leaq (%rsp), %rcx
+
+GLOBAL(ftrace_regs_call)
+ call ftrace_stub
+
+ /* Copy flags back to SS, to restore them */
+ movq EFLAGS(%rsp), %rax
+ movq %rax, SS(%rsp)
+
+ /* Handlers can change the RIP */
+ movq RIP(%rsp), %rax
+ movq %rax, SS+8(%rsp)
+
+ /* restore the rest of pt_regs */
+ movq R15(%rsp), %r15
+ movq R14(%rsp), %r14
+ movq R13(%rsp), %r13
+ movq R12(%rsp), %r12
+ movq R10(%rsp), %r10
+ movq RBP(%rsp), %rbp
+ movq RBX(%rsp), %rbx
+
+ /* skip=8 to skip flags saved in SS */
+ MCOUNT_RESTORE_FRAME 8
+
+ /* Restore flags */
+ popfq
+
+ jmp ftrace_return
+ftrace_restore_flags:
+ popfq
+ jmp ftrace_stub
+
+END(ftrace_regs_caller)
+
+
#else /* ! CONFIG_DYNAMIC_FTRACE */
-ENTRY(mcount)
+
+ENTRY(function_hook)
cmpl $0, function_trace_stop
jne ftrace_stub
@@ -120,8 +216,12 @@ GLOBAL(ftrace_stub)
trace:
MCOUNT_SAVE_FRAME
- movq 0x38(%rsp), %rdi
+ movq RIP(%rsp), %rdi
+#ifdef CC_USING_FENTRY
+ movq SS+16(%rsp), %rsi
+#else
movq 8(%rbp), %rsi
+#endif
subq $MCOUNT_INSN_SIZE, %rdi
call *ftrace_trace_function
@@ -129,20 +229,22 @@ trace:
MCOUNT_RESTORE_FRAME
jmp ftrace_stub
-END(mcount)
+END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
- cmpl $0, function_trace_stop
- jne ftrace_stub
-
MCOUNT_SAVE_FRAME
+#ifdef CC_USING_FENTRY
+ leaq SS+16(%rsp), %rdi
+ movq $0, %rdx /* No framepointers needed */
+#else
leaq 8(%rbp), %rdi
- movq 0x38(%rsp), %rsi
movq (%rbp), %rdx
+#endif
+ movq RIP(%rsp), %rsi
subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index c3a7cb4bf6e..1d414029f1d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -206,6 +206,21 @@ static int
ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
unsigned const char *new_code);
+/*
+ * Should never be called:
+ * As it is only called by __ftrace_replace_code() which is called by
+ * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
+ * which is called to turn mcount into nops or nops into function calls
+ * but not to convert a function from not using regs to one that uses
+ * regs, which ftrace_modify_call() is for.
+ */
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
@@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(ip, old, new);
+ /* Also update the regs callback function */
+ if (!ret) {
+ ip = (unsigned long)(&ftrace_regs_call);
+ memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = ftrace_modify_code(ip, old, new);
+ }
+
atomic_dec(&modifying_ftrace_code);
return ret;
@@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
return add_break(rec->ip, old);
}
+/*
+ * If the record has the FTRACE_FL_REGS set, that means that it
+ * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
+ * is not not set, then it wants to convert to the normal callback.
+ */
+static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
+{
+ if (rec->flags & FTRACE_FL_REGS)
+ return (unsigned long)FTRACE_REGS_ADDR;
+ else
+ return (unsigned long)FTRACE_ADDR;
+}
+
+/*
+ * The FTRACE_FL_REGS_EN is set when the record already points to
+ * a function that saves all the regs. Basically the '_EN' version
+ * represents the current state of the function.
+ */
+static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
+{
+ if (rec->flags & FTRACE_FL_REGS_EN)
+ return (unsigned long)FTRACE_REGS_ADDR;
+ else
+ return (unsigned long)FTRACE_ADDR;
+}
+
static int add_breakpoints(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
@@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable);
- ftrace_addr = (unsigned long)FTRACE_ADDR;
+ ftrace_addr = get_ftrace_addr(rec);
switch (ret) {
case FTRACE_UPDATE_IGNORE:
@@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
/* converting nop to call */
return add_brk_on_nop(rec);
+ case FTRACE_UPDATE_MODIFY_CALL_REGS:
+ case FTRACE_UPDATE_MODIFY_CALL:
+ ftrace_addr = get_ftrace_old_addr(rec);
+ /* fall through */
case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */
return add_brk_on_call(rec, ftrace_addr);
@@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
* If not, don't touch the breakpoint, we make just create
* a disaster.
*/
- ftrace_addr = (unsigned long)FTRACE_ADDR;
+ ftrace_addr = get_ftrace_addr(rec);
+ nop = ftrace_call_replace(ip, ftrace_addr);
+
+ if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
+ goto update;
+
+ /* Check both ftrace_addr and ftrace_old_addr */
+ ftrace_addr = get_ftrace_old_addr(rec);
nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
return -EINVAL;
}
+ update:
return probe_kernel_write((void *)ip, &nop[0], 1);
}
@@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable);
- ftrace_addr = (unsigned long)FTRACE_ADDR;
+ ftrace_addr = get_ftrace_addr(rec);
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
+ case FTRACE_UPDATE_MODIFY_CALL_REGS:
+ case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */
return add_update_call(rec, ftrace_addr);
@@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_update_record(rec, enable);
- ftrace_addr = (unsigned long)FTRACE_ADDR;
+ ftrace_addr = get_ftrace_addr(rec);
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
+ case FTRACE_UPDATE_MODIFY_CALL_REGS:
+ case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */
return finish_update_call(rec, ftrace_addr);
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index e2f751efb7b..57916c0d3cf 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -541,6 +541,23 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
return 1;
}
+#ifdef KPROBES_CAN_USE_FTRACE
+static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ /*
+ * Emulate singlestep (and also recover regs->ip)
+ * as if there is a 5byte nop
+ */
+ regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ __this_cpu_write(current_kprobe, NULL);
+}
+#endif
+
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled throughout this function.
@@ -599,6 +616,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
} else if (kprobe_running()) {
p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
+#ifdef KPROBES_CAN_USE_FTRACE
+ if (kprobe_ftrace(p)) {
+ skip_singlestep(p, regs, kcb);
+ return 1;
+ }
+#endif
setup_singlestep(p, regs, kcb, 0);
return 1;
}
@@ -1052,6 +1075,50 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0;
}
+#ifdef KPROBES_CAN_USE_FTRACE
+/* Ftrace callback handler for kprobes */
+void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+ unsigned long flags;
+
+ /* Disable irq for emulating a breakpoint and avoiding preempt */
+ local_irq_save(flags);
+
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto end;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+ regs->ip = ip + sizeof(kprobe_opcode_t);
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ skip_singlestep(p, regs, kcb);
+ /*
+ * If pre_handler returns !0, it sets regs->ip and
+ * resets current kprobe.
+ */
+ }
+end:
+ local_irq_restore(flags);
+}
+
+int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ p->ainsn.boostable = -1;
+ return 0;
+}
+#endif
+
int __init arch_init_kprobes(void)
{
return arch_init_optprobes();
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
new file mode 100644
index 00000000000..e309cc5c276
--- /dev/null
+++ b/arch/x86/kernel/perf_regs.c
@@ -0,0 +1,105 @@
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <linux/stddef.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_X86_32
+#define PERF_REG_X86_MAX PERF_REG_X86_32_MAX
+#else
+#define PERF_REG_X86_MAX PERF_REG_X86_64_MAX
+#endif
+
+#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
+
+static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
+ PT_REGS_OFFSET(PERF_REG_X86_AX, ax),
+ PT_REGS_OFFSET(PERF_REG_X86_BX, bx),
+ PT_REGS_OFFSET(PERF_REG_X86_CX, cx),
+ PT_REGS_OFFSET(PERF_REG_X86_DX, dx),
+ PT_REGS_OFFSET(PERF_REG_X86_SI, si),
+ PT_REGS_OFFSET(PERF_REG_X86_DI, di),
+ PT_REGS_OFFSET(PERF_REG_X86_BP, bp),
+ PT_REGS_OFFSET(PERF_REG_X86_SP, sp),
+ PT_REGS_OFFSET(PERF_REG_X86_IP, ip),
+ PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags),
+ PT_REGS_OFFSET(PERF_REG_X86_CS, cs),
+ PT_REGS_OFFSET(PERF_REG_X86_SS, ss),
+#ifdef CONFIG_X86_32
+ PT_REGS_OFFSET(PERF_REG_X86_DS, ds),
+ PT_REGS_OFFSET(PERF_REG_X86_ES, es),
+ PT_REGS_OFFSET(PERF_REG_X86_FS, fs),
+ PT_REGS_OFFSET(PERF_REG_X86_GS, gs),
+#else
+ /*
+ * The pt_regs struct does not store
+ * ds, es, fs, gs in 64 bit mode.
+ */
+ (unsigned int) -1,
+ (unsigned int) -1,
+ (unsigned int) -1,
+ (unsigned int) -1,
+#endif
+#ifdef CONFIG_X86_64
+ PT_REGS_OFFSET(PERF_REG_X86_R8, r8),
+ PT_REGS_OFFSET(PERF_REG_X86_R9, r9),
+ PT_REGS_OFFSET(PERF_REG_X86_R10, r10),
+ PT_REGS_OFFSET(PERF_REG_X86_R11, r11),
+ PT_REGS_OFFSET(PERF_REG_X86_R12, r12),
+ PT_REGS_OFFSET(PERF_REG_X86_R13, r13),
+ PT_REGS_OFFSET(PERF_REG_X86_R14, r14),
+ PT_REGS_OFFSET(PERF_REG_X86_R15, r15),
+#endif
+};
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
+ return 0;
+
+ return regs_get_register(regs, pt_regs_offset[idx]);
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL))
+
+#ifdef CONFIG_X86_32
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+#else /* CONFIG_X86_64 */
+#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
+ (1ULL << PERF_REG_X86_ES) | \
+ (1ULL << PERF_REG_X86_FS) | \
+ (1ULL << PERF_REG_X86_GS))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ if (mask & REG_NOSUPPORT)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_IA32))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index c346d116148..cd3b2438a98 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -157,6 +157,33 @@ static int enable_single_step(struct task_struct *child)
return 1;
}
+void set_task_blockstep(struct task_struct *task, bool on)
+{
+ unsigned long debugctl;
+
+ /*
+ * Ensure irq/preemption can't change debugctl in between.
+ * Note also that both TIF_BLOCKSTEP and debugctl should
+ * be changed atomically wrt preemption.
+ * FIXME: this means that set/clear TIF_BLOCKSTEP is simply
+ * wrong if task != current, SIGKILL can wakeup the stopped
+ * tracee and set/clear can play with the running task, this
+ * can confuse the next __switch_to_xtra().
+ */
+ local_irq_disable();
+ debugctl = get_debugctlmsr();
+ if (on) {
+ debugctl |= DEBUGCTLMSR_BTF;
+ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
+ } else {
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+ }
+ if (task == current)
+ update_debugctlmsr(debugctl);
+ local_irq_enable();
+}
+
/*
* Enable single or block step.
*/
@@ -169,19 +196,10 @@ static void enable_step(struct task_struct *child, bool block)
* So no one should try to use debugger block stepping in a program
* that uses user-mode single stepping itself.
*/
- if (enable_single_step(child) && block) {
- unsigned long debugctl = get_debugctlmsr();
-
- debugctl |= DEBUGCTLMSR_BTF;
- update_debugctlmsr(debugctl);
- set_tsk_thread_flag(child, TIF_BLOCKSTEP);
- } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
- unsigned long debugctl = get_debugctlmsr();
-
- debugctl &= ~DEBUGCTLMSR_BTF;
- update_debugctlmsr(debugctl);
- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
- }
+ if (enable_single_step(child) && block)
+ set_task_blockstep(child, true);
+ else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
+ set_task_blockstep(child, false);
}
void user_enable_single_step(struct task_struct *child)
@@ -199,13 +217,8 @@ void user_disable_single_step(struct task_struct *child)
/*
* Make sure block stepping (BTF) is disabled.
*/
- if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
- unsigned long debugctl = get_debugctlmsr();
-
- debugctl &= ~DEBUGCTLMSR_BTF;
- update_debugctlmsr(debugctl);
- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
- }
+ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
+ set_task_blockstep(child, false);
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 36fd42091fa..9538f00827a 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -41,6 +41,9 @@
/* Adjust the return address of a call insn */
#define UPROBE_FIX_CALL 0x2
+/* Instruction will modify TF, don't change it */
+#define UPROBE_FIX_SETF 0x4
+
#define UPROBE_FIX_RIP_AX 0x8000
#define UPROBE_FIX_RIP_CX 0x4000
@@ -239,6 +242,10 @@ static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
insn_get_opcode(insn); /* should be a nop */
switch (OPCODE1(insn)) {
+ case 0x9d:
+ /* popf */
+ auprobe->fixups |= UPROBE_FIX_SETF;
+ break;
case 0xc3: /* ret/lret */
case 0xcb:
case 0xc2:
@@ -646,7 +653,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* Skip these instructions as per the currently known x86 ISA.
* 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 }
*/
-bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int i;
@@ -673,3 +680,46 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
}
return false;
}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ bool ret = __skip_sstep(auprobe, regs);
+ if (ret && (regs->flags & X86_EFLAGS_TF))
+ send_sig(SIGTRAP, current, 0);
+ return ret;
+}
+
+void arch_uprobe_enable_step(struct arch_uprobe *auprobe)
+{
+ struct task_struct *task = current;
+ struct arch_uprobe_task *autask = &task->utask->autask;
+ struct pt_regs *regs = task_pt_regs(task);
+
+ autask->saved_tf = !!(regs->flags & X86_EFLAGS_TF);
+
+ regs->flags |= X86_EFLAGS_TF;
+ if (test_tsk_thread_flag(task, TIF_BLOCKSTEP))
+ set_task_blockstep(task, false);
+}
+
+void arch_uprobe_disable_step(struct arch_uprobe *auprobe)
+{
+ struct task_struct *task = current;
+ struct arch_uprobe_task *autask = &task->utask->autask;
+ bool trapped = (task->utask->state == UTASK_SSTEP_TRAPPED);
+ struct pt_regs *regs = task_pt_regs(task);
+ /*
+ * The state of TIF_BLOCKSTEP was not saved so we can get an extra
+ * SIGTRAP if we do not clear TF. We need to examine the opcode to
+ * make it right.
+ */
+ if (unlikely(trapped)) {
+ if (!autask->saved_tf)
+ regs->flags &= ~X86_EFLAGS_TF;
+ } else {
+ if (autask->saved_tf)
+ send_sig(SIGTRAP, task, 0);
+ else if (!(auprobe->fixups & UPROBE_FIX_SETF))
+ regs->flags &= ~X86_EFLAGS_TF;
+ }
+}
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 6020f6f5927..1330dd10295 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -13,9 +13,13 @@
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
-/* mcount is defined in assembly */
+/* mcount and __fentry__ are defined in assembly */
+#ifdef CC_USING_FENTRY
+EXPORT_SYMBOL(__fentry__);
+#else
EXPORT_SYMBOL(mcount);
#endif
+#endif
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a71faf727ff..bca63f04dcc 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -183,95 +183,6 @@ TRACE_EVENT(kvm_apic,
#define KVM_ISA_VMX 1
#define KVM_ISA_SVM 2
-#define VMX_EXIT_REASONS \
- { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
- { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
- { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
- { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
- { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
- { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
- { EXIT_REASON_CPUID, "CPUID" }, \
- { EXIT_REASON_HLT, "HLT" }, \
- { EXIT_REASON_INVLPG, "INVLPG" }, \
- { EXIT_REASON_RDPMC, "RDPMC" }, \
- { EXIT_REASON_RDTSC, "RDTSC" }, \
- { EXIT_REASON_VMCALL, "VMCALL" }, \
- { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
- { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
- { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
- { EXIT_REASON_VMPTRST, "VMPTRST" }, \
- { EXIT_REASON_VMREAD, "VMREAD" }, \
- { EXIT_REASON_VMRESUME, "VMRESUME" }, \
- { EXIT_REASON_VMWRITE, "VMWRITE" }, \
- { EXIT_REASON_VMOFF, "VMOFF" }, \
- { EXIT_REASON_VMON, "VMON" }, \
- { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
- { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
- { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
- { EXIT_REASON_MSR_READ, "MSR_READ" }, \
- { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
- { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
- { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
- { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
- { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
- { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
- { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
- { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
- { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
- { EXIT_REASON_WBINVD, "WBINVD" }
-
-#define SVM_EXIT_REASONS \
- { SVM_EXIT_READ_CR0, "read_cr0" }, \
- { SVM_EXIT_READ_CR3, "read_cr3" }, \
- { SVM_EXIT_READ_CR4, "read_cr4" }, \
- { SVM_EXIT_READ_CR8, "read_cr8" }, \
- { SVM_EXIT_WRITE_CR0, "write_cr0" }, \
- { SVM_EXIT_WRITE_CR3, "write_cr3" }, \
- { SVM_EXIT_WRITE_CR4, "write_cr4" }, \
- { SVM_EXIT_WRITE_CR8, "write_cr8" }, \
- { SVM_EXIT_READ_DR0, "read_dr0" }, \
- { SVM_EXIT_READ_DR1, "read_dr1" }, \
- { SVM_EXIT_READ_DR2, "read_dr2" }, \
- { SVM_EXIT_READ_DR3, "read_dr3" }, \
- { SVM_EXIT_WRITE_DR0, "write_dr0" }, \
- { SVM_EXIT_WRITE_DR1, "write_dr1" }, \
- { SVM_EXIT_WRITE_DR2, "write_dr2" }, \
- { SVM_EXIT_WRITE_DR3, "write_dr3" }, \
- { SVM_EXIT_WRITE_DR5, "write_dr5" }, \
- { SVM_EXIT_WRITE_DR7, "write_dr7" }, \
- { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
- { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
- { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
- { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
- { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
- { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
- { SVM_EXIT_INTR, "interrupt" }, \
- { SVM_EXIT_NMI, "nmi" }, \
- { SVM_EXIT_SMI, "smi" }, \
- { SVM_EXIT_INIT, "init" }, \
- { SVM_EXIT_VINTR, "vintr" }, \
- { SVM_EXIT_CPUID, "cpuid" }, \
- { SVM_EXIT_INVD, "invd" }, \
- { SVM_EXIT_HLT, "hlt" }, \
- { SVM_EXIT_INVLPG, "invlpg" }, \
- { SVM_EXIT_INVLPGA, "invlpga" }, \
- { SVM_EXIT_IOIO, "io" }, \
- { SVM_EXIT_MSR, "msr" }, \
- { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
- { SVM_EXIT_SHUTDOWN, "shutdown" }, \
- { SVM_EXIT_VMRUN, "vmrun" }, \
- { SVM_EXIT_VMMCALL, "hypercall" }, \
- { SVM_EXIT_VMLOAD, "vmload" }, \
- { SVM_EXIT_VMSAVE, "vmsave" }, \
- { SVM_EXIT_STGI, "stgi" }, \
- { SVM_EXIT_CLGI, "clgi" }, \
- { SVM_EXIT_SKINIT, "skinit" }, \
- { SVM_EXIT_WBINVD, "wbinvd" }, \
- { SVM_EXIT_MONITOR, "monitor" }, \
- { SVM_EXIT_MWAIT, "mwait" }, \
- { SVM_EXIT_XSETBV, "xsetbv" }, \
- { SVM_EXIT_NPF, "npf" }
-
/*
* Tracepoint for kvm guest exit:
*/
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index b8ef8ddcc29..8aa73fac6ad 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -451,14 +451,9 @@ static void wq_sync_buffer(struct work_struct *work)
{
struct oprofile_cpu_buffer *b =
container_of(work, struct oprofile_cpu_buffer, work.work);
- if (b->cpu != smp_processor_id()) {
- printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
- smp_processor_id(), b->cpu);
-
- if (!cpu_online(b->cpu)) {
- cancel_delayed_work(&b->work);
- return;
- }
+ if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
+ cancel_delayed_work(&b->work);
+ return;
}
sync_buffer(b->cpu);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 55e6d63d46d..a52f2f4fe03 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -10,6 +10,7 @@
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
+#include <linux/ptrace.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -18,6 +19,28 @@
#include <asm/ftrace.h>
+/*
+ * If the arch supports passing the variable contents of
+ * function_trace_op as the third parameter back from the
+ * mcount call, then the arch should define this as 1.
+ */
+#ifndef ARCH_SUPPORTS_FTRACE_OPS
+#define ARCH_SUPPORTS_FTRACE_OPS 0
+#endif
+
+/*
+ * If the arch's mcount caller does not support all of ftrace's
+ * features, then it must call an indirect function that
+ * does. Or at least does enough to prevent any unwelcomed side effects.
+ */
+#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
+ !ARCH_SUPPORTS_FTRACE_OPS
+# define FTRACE_FORCE_LIST_FUNC 1
+#else
+# define FTRACE_FORCE_LIST_FUNC 0
+#endif
+
+
struct module;
struct ftrace_hash;
@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+struct ftrace_ops;
+
+typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
* could be controled by following calls:
* ftrace_function_local_enable
* ftrace_function_local_disable
+ * SAVE_REGS - The ftrace_ops wants regs saved at each function called
+ * and passed to the callback. If this flag is set, but the
+ * architecture does not support passing regs
+ * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
+ * ftrace_ops will fail to register, unless the next flag
+ * is set.
+ * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
+ * handler can handle an arch that does not save regs
+ * (the handler tests if regs == NULL), then it can set
+ * this flag instead. It will not fail registering the ftrace_ops
+ * but, the regs field will be NULL if the arch does not support
+ * passing regs to the handler.
+ * Note, if this flag is set, the SAVE_REGS flag will automatically
+ * get set upon registering the ftrace_ops, if the arch supports it.
+ * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back has its own recursion protection. If it does
+ * not set this, then the ftrace infrastructure will add recursion
+ * protection for the caller.
*/
enum {
- FTRACE_OPS_FL_ENABLED = 1 << 0,
- FTRACE_OPS_FL_GLOBAL = 1 << 1,
- FTRACE_OPS_FL_DYNAMIC = 1 << 2,
- FTRACE_OPS_FL_CONTROL = 1 << 3,
+ FTRACE_OPS_FL_ENABLED = 1 << 0,
+ FTRACE_OPS_FL_GLOBAL = 1 << 1,
+ FTRACE_OPS_FL_DYNAMIC = 1 << 2,
+ FTRACE_OPS_FL_CONTROL = 1 << 3,
+ FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
+ FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
};
struct ftrace_ops {
@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
return *this_cpu_ptr(ops->disabled);
}
-extern void ftrace_stub(unsigned long a0, unsigned long a1);
+extern void ftrace_stub(unsigned long a0, unsigned long a1,
+ struct ftrace_ops *op, struct pt_regs *regs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
*/
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
+static inline int ftrace_nr_registered_ops(void)
+{
+ return 0;
+}
static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { }
@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob);
extern int ftrace_text_reserved(void *start, void *end);
+extern int ftrace_nr_registered_ops(void);
+
+/*
+ * The dyn_ftrace record's flags field is split into two parts.
+ * the first part which is '0-FTRACE_REF_MAX' is a counter of
+ * the number of callbacks that have registered the function that
+ * the dyn_ftrace descriptor represents.
+ *
+ * The second part is a mask:
+ * ENABLED - the function is being traced
+ * REGS - the record wants the function to save regs
+ * REGS_EN - the function is set up to save regs.
+ *
+ * When a new ftrace_ops is registered and wants a function to save
+ * pt_regs, the rec->flag REGS is set. When the function has been
+ * set up to save regs, the REG_EN flag is set. Once a function
+ * starts saving regs it will do so until all ftrace_ops are removed
+ * from tracing that function.
+ */
enum {
- FTRACE_FL_ENABLED = (1 << 30),
+ FTRACE_FL_ENABLED = (1UL << 29),
+ FTRACE_FL_REGS = (1UL << 30),
+ FTRACE_FL_REGS_EN = (1UL << 31)
};
-#define FTRACE_FL_MASK (0x3UL << 30)
-#define FTRACE_REF_MAX ((1 << 30) - 1)
+#define FTRACE_FL_MASK (0x7UL << 29)
+#define FTRACE_REF_MAX ((1UL << 29) - 1)
struct dyn_ftrace {
union {
@@ -244,6 +317,8 @@ struct dyn_ftrace {
};
int ftrace_force_update(void);
+int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
+ int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -263,9 +338,23 @@ enum {
FTRACE_STOP_FUNC_RET = (1 << 4),
};
+/*
+ * The FTRACE_UPDATE_* enum is used to pass information back
+ * from the ftrace_update_record() and ftrace_test_record()
+ * functions. These are called by the code update routines
+ * to find out what is to be done for a given function.
+ *
+ * IGNORE - The function is already what we want it to be
+ * MAKE_CALL - Start tracing the function
+ * MODIFY_CALL - Stop saving regs for the function
+ * MODIFY_CALL_REGS - Start saving regs for the function
+ * MAKE_NOP - Stop tracing the function
+ */
enum {
FTRACE_UPDATE_IGNORE,
FTRACE_UPDATE_MAKE_CALL,
+ FTRACE_UPDATE_MODIFY_CALL,
+ FTRACE_UPDATE_MODIFY_CALL_REGS,
FTRACE_UPDATE_MAKE_NOP,
};
@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
+extern void ftrace_regs_caller(void);
extern void ftrace_call(void);
+extern void ftrace_regs_call(void);
extern void mcount_call(void);
void ftrace_modify_all_code(int command);
@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command);
#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
+
+#ifndef FTRACE_REGS_ADDR
+#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
+#else
+# define FTRACE_REGS_ADDR FTRACE_ADDR
+#endif
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+/**
+ * ftrace_modify_call - convert from one addr to another (no nop)
+ * @rec: the mcount call site record
+ * @old_addr: the address expected to be currently called to
+ * @addr: the address to change to
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a caller to @old_addr
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr);
+#else
+/* Should never be called */
+static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return -EINVAL;
+}
+#endif
+
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
-#else
+#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end)
{
return 0;
}
+static inline unsigned long ftrace_location(unsigned long ip)
+{
+ return 0;
+}
/*
* Again users of functions that have ftrace_ops may not
@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end)
*/
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
+#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index b6e1f8c0057..23755ba42ab 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -38,6 +38,7 @@
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
+#include <linux/ftrace.h>
#ifdef CONFIG_KPROBES
#include <asm/kprobes.h>
@@ -48,14 +49,26 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
+/*
+ * If function tracer is enabled and the arch supports full
+ * passing of pt_regs to function tracing, then kprobes can
+ * optimize on top of function tracing.
+ */
+#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
+ && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
+# define KPROBES_CAN_USE_FTRACE
+#endif
+
/* Attach to insert probes on any functions which should be ignored*/
#define __kprobes __attribute__((__section__(".kprobes.text")))
+
#else /* CONFIG_KPROBES */
typedef int kprobe_opcode_t;
struct arch_specific_insn {
int dummy;
};
#define __kprobes
+
#endif /* CONFIG_KPROBES */
struct kprobe;
@@ -128,6 +141,7 @@ struct kprobe {
* NOTE:
* this flag is only for optimized_kprobe.
*/
+#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
/* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p)
@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
+
+/* Is this kprobe uses ftrace ? */
+static inline int kprobe_ftrace(struct kprobe *p)
+{
+ return p->flags & KPROBE_FLAG_FTRACE;
+}
+
/*
* Special probe type that uses setjmp-longjmp type tricks to resume
* execution at a specified entry with a matching prototype corresponding
@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
#endif
#endif /* CONFIG_OPTPROBES */
+#ifdef KPROBES_CAN_USE_FTRACE
+extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs);
+extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
+#endif
+
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index bdb41612bfe..599afc4bb67 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -130,8 +130,10 @@ enum perf_event_sample_format {
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_RAW = 1U << 10,
PERF_SAMPLE_BRANCH_STACK = 1U << 11,
+ PERF_SAMPLE_REGS_USER = 1U << 12,
+ PERF_SAMPLE_STACK_USER = 1U << 13,
- PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 14, /* non-ABI */
};
/*
@@ -163,6 +165,15 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_HV)
/*
+ * Values to determine ABI of the registers dump.
+ */
+enum perf_sample_regs_abi {
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
+ PERF_SAMPLE_REGS_ABI_32 = 1,
+ PERF_SAMPLE_REGS_ABI_64 = 2,
+};
+
+/*
* The format of the data returned by read() on a perf event fd,
* as specified by attr.read_format:
*
@@ -194,6 +205,8 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
+#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
+ /* add: sample_stack_user */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -255,7 +268,10 @@ struct perf_event_attr {
exclude_host : 1, /* don't count in host */
exclude_guest : 1, /* don't count in guest */
- __reserved_1 : 43;
+ exclude_callchain_kernel : 1, /* exclude kernel callchains */
+ exclude_callchain_user : 1, /* exclude user callchains */
+
+ __reserved_1 : 41;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -271,7 +287,21 @@ struct perf_event_attr {
__u64 bp_len;
__u64 config2; /* extension of config1 */
};
- __u64 branch_sample_type; /* enum branch_sample_type */
+ __u64 branch_sample_type; /* enum perf_branch_sample_type */
+
+ /*
+ * Defines set of user regs to dump on samples.
+ * See asm/perf_regs.h for details.
+ */
+ __u64 sample_regs_user;
+
+ /*
+ * Defines size of the user stack to dump on samples.
+ */
+ __u32 sample_stack_user;
+
+ /* Align to u64. */
+ __u32 __reserved_2;
};
#define perf_flags(attr) (*(&(attr)->read_format + 1))
@@ -550,6 +580,13 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
*
* { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ *
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+ *
+ * { u64 size;
+ * char data[size];
+ * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -611,6 +648,7 @@ struct perf_guest_info_callbacks {
#include <linux/static_key.h>
#include <linux/atomic.h>
#include <linux/sysfs.h>
+#include <linux/perf_regs.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -656,6 +694,11 @@ struct perf_branch_stack {
struct perf_branch_entry entries[0];
};
+struct perf_regs_user {
+ __u64 abi;
+ struct pt_regs *regs;
+};
+
struct task_struct;
/*
@@ -1135,6 +1178,8 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
+ struct perf_regs_user regs_user;
+ u64 stack_user_size;
};
static inline void perf_sample_data_init(struct perf_sample_data *data,
@@ -1144,7 +1189,10 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->addr = addr;
data->raw = NULL;
data->br_stack = NULL;
- data->period = period;
+ data->period = period;
+ data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
+ data->regs_user.regs = NULL;
+ data->stack_user_size = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1292,8 +1340,10 @@ static inline bool has_branch_stack(struct perf_event *event)
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);
-extern void perf_output_copy(struct perf_output_handle *handle,
+extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
+extern unsigned int perf_output_skip(struct perf_output_handle *handle,
+ unsigned int len);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern void perf_event_enable(struct perf_event *event);
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
new file mode 100644
index 00000000000..3c73d5fe18b
--- /dev/null
+++ b/include/linux/perf_regs.h
@@ -0,0 +1,25 @@
+#ifndef _LINUX_PERF_REGS_H
+#define _LINUX_PERF_REGS_H
+
+#ifdef CONFIG_HAVE_PERF_REGS
+#include <asm/perf_regs.h>
+u64 perf_reg_value(struct pt_regs *regs, int idx);
+int perf_reg_validate(u64 mask);
+u64 perf_reg_abi(struct task_struct *task);
+#else
+static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ return 0;
+}
+
+static inline int perf_reg_validate(u64 mask)
+{
+ return mask ? -ENOSYS : 0;
+}
+
+static inline u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_NONE;
+}
+#endif /* CONFIG_HAVE_PERF_REGS */
+#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 335720a1fc3..83035269e59 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -446,6 +446,9 @@ extern int get_dumpable(struct mm_struct *mm);
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+#define MMF_HAS_UPROBES 19 /* has uprobes */
+#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
+
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
struct sighand_struct {
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index efe4b3308c7..e6f0331e3d4 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -99,25 +99,27 @@ struct xol_area {
struct uprobes_state {
struct xol_area *xol_area;
- atomic_t count;
};
+
extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr, bool verify);
+extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
+extern void __weak arch_uprobe_enable_step(struct arch_uprobe *arch);
+extern void __weak arch_uprobe_disable_step(struct arch_uprobe *arch);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void);
extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
extern void uprobe_clear_state(struct mm_struct *mm);
-extern void uprobe_reset_state(struct mm_struct *mm);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
@@ -138,6 +140,10 @@ static inline void
uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
}
+static inline void
+uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
+{
+}
static inline void uprobe_notify_resume(struct pt_regs *regs)
{
}
@@ -158,8 +164,5 @@ static inline void uprobe_copy_process(struct task_struct *t)
static inline void uprobe_clear_state(struct mm_struct *mm)
{
}
-static inline void uprobe_reset_state(struct mm_struct *mm)
-{
-}
#endif /* !CONFIG_UPROBES */
#endif /* _LINUX_UPROBES_H */
diff --git a/kernel/Makefile b/kernel/Makefile
index e5602d32acb..5404911eaee 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -97,7 +97,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o
obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
-obj-$(CONFIG_X86_DS) += trace/
+obj-$(CONFIG_TRACE_CLOCK) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 98d4597f43d..c77206184b8 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -159,6 +159,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
int rctx;
struct perf_callchain_entry *entry;
+ int kernel = !event->attr.exclude_callchain_kernel;
+ int user = !event->attr.exclude_callchain_user;
+
+ if (!kernel && !user)
+ return NULL;
entry = get_callchain_entry(&rctx);
if (rctx == -1)
@@ -169,24 +174,29 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
entry->nr = 0;
- if (!user_mode(regs)) {
+ if (kernel && !user_mode(regs)) {
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
perf_callchain_kernel(entry, regs);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
}
- if (regs) {
- /*
- * Disallow cross-task user callchains.
- */
- if (event->ctx->task && event->ctx->task != current)
- goto exit_put;
-
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_user(entry, regs);
+ if (user) {
+ if (!user_mode(regs)) {
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ /*
+ * Disallow cross-task user callchains.
+ */
+ if (event->ctx->task && event->ctx->task != current)
+ goto exit_put;
+
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_user(entry, regs);
+ }
}
exit_put:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7fee567153f..7b9df353ba1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -36,6 +36,7 @@
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
+#include <linux/mm_types.h>
#include "internal.h"
@@ -3764,6 +3765,132 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+static void
+perf_output_sample_regs(struct perf_output_handle *handle,
+ struct pt_regs *regs, u64 mask)
+{
+ int bit;
+
+ for_each_set_bit(bit, (const unsigned long *) &mask,
+ sizeof(mask) * BITS_PER_BYTE) {
+ u64 val;
+
+ val = perf_reg_value(regs, bit);
+ perf_output_put(handle, val);
+ }
+}
+
+static void perf_sample_regs_user(struct perf_regs_user *regs_user,
+ struct pt_regs *regs)
+{
+ if (!user_mode(regs)) {
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ regs_user->regs = regs;
+ regs_user->abi = perf_reg_abi(current);
+ }
+}
+
+/*
+ * Get remaining task size from user stack pointer.
+ *
+ * It'd be better to take stack vma map and limit this more
+ * precisly, but there's no way to get it safely under interrupt,
+ * so using TASK_SIZE as limit.
+ */
+static u64 perf_ustack_task_size(struct pt_regs *regs)
+{
+ unsigned long addr = perf_user_stack_pointer(regs);
+
+ if (!addr || addr >= TASK_SIZE)
+ return 0;
+
+ return TASK_SIZE - addr;
+}
+
+static u16
+perf_sample_ustack_size(u16 stack_size, u16 header_size,
+ struct pt_regs *regs)
+{
+ u64 task_size;
+
+ /* No regs, no stack pointer, no dump. */
+ if (!regs)
+ return 0;
+
+ /*
+ * Check if we fit in with the requested stack size into the:
+ * - TASK_SIZE
+ * If we don't, we limit the size to the TASK_SIZE.
+ *
+ * - remaining sample size
+ * If we don't, we customize the stack size to
+ * fit in to the remaining sample size.
+ */
+
+ task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
+ stack_size = min(stack_size, (u16) task_size);
+
+ /* Current header size plus static size and dynamic size. */
+ header_size += 2 * sizeof(u64);
+
+ /* Do we fit in with the current stack dump size? */
+ if ((u16) (header_size + stack_size) < header_size) {
+ /*
+ * If we overflow the maximum size for the sample,
+ * we customize the stack dump size to fit in.
+ */
+ stack_size = USHRT_MAX - header_size - sizeof(u64);
+ stack_size = round_up(stack_size, sizeof(u64));
+ }
+
+ return stack_size;
+}
+
+static void
+perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+ struct pt_regs *regs)
+{
+ /* Case of a kernel thread, nothing to dump */
+ if (!regs) {
+ u64 size = 0;
+ perf_output_put(handle, size);
+ } else {
+ unsigned long sp;
+ unsigned int rem;
+ u64 dyn_size;
+
+ /*
+ * We dump:
+ * static size
+ * - the size requested by user or the best one we can fit
+ * in to the sample max size
+ * data
+ * - user stack dump data
+ * dynamic size
+ * - the actual dumped size
+ */
+
+ /* Static size. */
+ perf_output_put(handle, dump_size);
+
+ /* Data. */
+ sp = perf_user_stack_pointer(regs);
+ rem = __output_copy_user(handle, (void *) sp, dump_size);
+ dyn_size = dump_size - rem;
+
+ perf_output_skip(handle, rem);
+
+ /* Dynamic size. */
+ perf_output_put(handle, dyn_size);
+ }
+}
+
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
@@ -4024,6 +4151,28 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_put(handle, nr);
}
}
+
+ if (sample_type & PERF_SAMPLE_REGS_USER) {
+ u64 abi = data->regs_user.abi;
+
+ /*
+ * If there are no regs to dump, notice it through
+ * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
+ */
+ perf_output_put(handle, abi);
+
+ if (abi) {
+ u64 mask = event->attr.sample_regs_user;
+ perf_output_sample_regs(handle,
+ data->regs_user.regs,
+ mask);
+ }
+ }
+
+ if (sample_type & PERF_SAMPLE_STACK_USER)
+ perf_output_sample_ustack(handle,
+ data->stack_user_size,
+ data->regs_user.regs);
}
void perf_prepare_sample(struct perf_event_header *header,
@@ -4075,6 +4224,49 @@ void perf_prepare_sample(struct perf_event_header *header,
}
header->size += size;
}
+
+ if (sample_type & PERF_SAMPLE_REGS_USER) {
+ /* regs dump ABI info */
+ int size = sizeof(u64);
+
+ perf_sample_regs_user(&data->regs_user, regs);
+
+ if (data->regs_user.regs) {
+ u64 mask = event->attr.sample_regs_user;
+ size += hweight64(mask) * sizeof(u64);
+ }
+
+ header->size += size;
+ }
+
+ if (sample_type & PERF_SAMPLE_STACK_USER) {
+ /*
+ * Either we need PERF_SAMPLE_STACK_USER bit to be allways
+ * processed as the last one or have additional check added
+ * in case new sample type is added, because we could eat
+ * up the rest of the sample size.
+ */
+ struct perf_regs_user *uregs = &data->regs_user;
+ u16 stack_size = event->attr.sample_stack_user;
+ u16 size = sizeof(u64);
+
+ if (!uregs->abi)
+ perf_sample_regs_user(uregs, regs);
+
+ stack_size = perf_sample_ustack_size(stack_size, header->size,
+ uregs->regs);
+
+ /*
+ * If there is something to dump, add space for the dump
+ * itself and for the field that tells the dynamic size,
+ * which is how many have been actually dumped.
+ */
+ if (stack_size)
+ size += sizeof(u64) + stack_size;
+
+ data->stack_user_size = stack_size;
+ header->size += size;
+ }
}
static void perf_event_output(struct perf_event *event,
@@ -6151,6 +6343,28 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
attr->branch_sample_type = mask;
}
}
+
+ if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
+ ret = perf_reg_validate(attr->sample_regs_user);
+ if (ret)
+ return ret;
+ }
+
+ if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
+ if (!arch_perf_have_user_stack_dump())
+ return -ENOSYS;
+
+ /*
+ * We have __u32 type for the size, but so far
+ * we can only use __u16 as maximum due to the
+ * __u16 sample size limit.
+ */
+ if (attr->sample_stack_user >= USHRT_MAX)
+ ret = -EINVAL;
+ else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
+ ret = -EINVAL;
+ }
+
out:
return ret;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index a096c19f2c2..d56a64c99a8 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -2,6 +2,7 @@
#define _KERNEL_EVENTS_INTERNAL_H
#include <linux/hardirq.h>
+#include <linux/uaccess.h>
/* Buffer handling */
@@ -76,30 +77,53 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
-static inline void
-__output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len)
+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
+static inline unsigned int \
+func_name(struct perf_output_handle *handle, \
+ const void *buf, unsigned int len) \
+{ \
+ unsigned long size, written; \
+ \
+ do { \
+ size = min_t(unsigned long, handle->size, len); \
+ \
+ written = memcpy_func(handle->addr, buf, size); \
+ \
+ len -= written; \
+ handle->addr += written; \
+ buf += written; \
+ handle->size -= written; \
+ if (!handle->size) { \
+ struct ring_buffer *rb = handle->rb; \
+ \
+ handle->page++; \
+ handle->page &= rb->nr_pages - 1; \
+ handle->addr = rb->data_pages[handle->page]; \
+ handle->size = PAGE_SIZE << page_order(rb); \
+ } \
+ } while (len && written == size); \
+ \
+ return len; \
+}
+
+static inline int memcpy_common(void *dst, const void *src, size_t n)
{
- do {
- unsigned long size = min_t(unsigned long, handle->size, len);
-
- memcpy(handle->addr, buf, size);
-
- len -= size;
- handle->addr += size;
- buf += size;
- handle->size -= size;
- if (!handle->size) {
- struct ring_buffer *rb = handle->rb;
-
- handle->page++;
- handle->page &= rb->nr_pages - 1;
- handle->addr = rb->data_pages[handle->page];
- handle->size = PAGE_SIZE << page_order(rb);
- }
- } while (len);
+ memcpy(dst, src, n);
+ return n;
}
+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
+
+#define MEMCPY_SKIP(dst, src, n) (n)
+
+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
+
+#ifndef arch_perf_out_copy_user
+#define arch_perf_out_copy_user __copy_from_user_inatomic
+#endif
+
+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+
/* Callchain handling */
extern struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs);
@@ -134,4 +158,20 @@ static inline void put_recursion_context(int *recursion, int rctx)
recursion[rctx]--;
}
+#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
+static inline bool arch_perf_have_user_stack_dump(void)
+{
+ return true;
+}
+
+#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
+#else
+static inline bool arch_perf_have_user_stack_dump(void)
+{
+ return false;
+}
+
+#define perf_user_stack_pointer(regs) 0
+#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
+
#endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 6ddaba43fb7..23cb34ff397 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -182,10 +182,16 @@ out:
return -ENOSPC;
}
-void perf_output_copy(struct perf_output_handle *handle,
+unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len)
{
- __output_copy(handle, buf, len);
+ return __output_copy(handle, buf, len);
+}
+
+unsigned int perf_output_skip(struct perf_output_handle *handle,
+ unsigned int len)
+{
+ return __output_skip(handle, NULL, len);
}
void perf_output_end(struct perf_output_handle *handle)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c08a22d02f7..912ef48d28a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -280,12 +280,10 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
if (ret <= 0)
return ret;
- lock_page(page);
vaddr_new = kmap_atomic(page);
vaddr &= ~PAGE_MASK;
memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
kunmap_atomic(vaddr_new);
- unlock_page(page);
put_page(page);
@@ -334,7 +332,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
*/
result = is_swbp_at_addr(mm, vaddr);
if (result == 1)
- return -EEXIST;
+ return 0;
if (result)
return result;
@@ -347,24 +345,22 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
* @mm: the probed process address space.
* @auprobe: arch specific probepoint information.
* @vaddr: the virtual address to insert the opcode.
- * @verify: if true, verify existance of breakpoint instruction.
*
* For mm @mm, restore the original opcode (opcode) at @vaddr.
* Return 0 (success) or a negative errno.
*/
int __weak
-set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
+set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
- if (verify) {
- int result;
+ int result;
+
+ result = is_swbp_at_addr(mm, vaddr);
+ if (!result)
+ return -EINVAL;
- result = is_swbp_at_addr(mm, vaddr);
- if (!result)
- return -EINVAL;
+ if (result != 1)
+ return result;
- if (result != 1)
- return result;
- }
return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
}
@@ -415,11 +411,10 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
{
struct uprobe *uprobe;
- unsigned long flags;
- spin_lock_irqsave(&uprobes_treelock, flags);
+ spin_lock(&uprobes_treelock);
uprobe = __find_uprobe(inode, offset);
- spin_unlock_irqrestore(&uprobes_treelock, flags);
+ spin_unlock(&uprobes_treelock);
return uprobe;
}
@@ -466,12 +461,11 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
*/
static struct uprobe *insert_uprobe(struct uprobe *uprobe)
{
- unsigned long flags;
struct uprobe *u;
- spin_lock_irqsave(&uprobes_treelock, flags);
+ spin_lock(&uprobes_treelock);
u = __insert_uprobe(uprobe);
- spin_unlock_irqrestore(&uprobes_treelock, flags);
+ spin_unlock(&uprobes_treelock);
/* For now assume that the instruction need not be single-stepped */
uprobe->flags |= UPROBE_SKIP_SSTEP;
@@ -649,6 +643,7 @@ static int
install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long vaddr)
{
+ bool first_uprobe;
int ret;
/*
@@ -659,7 +654,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
* Hence behave as if probe already existed.
*/
if (!uprobe->consumers)
- return -EEXIST;
+ return 0;
if (!(uprobe->flags & UPROBE_COPY_INSN)) {
ret = copy_insn(uprobe, vma->vm_file);
@@ -681,17 +676,18 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
}
/*
- * Ideally, should be updating the probe count after the breakpoint
- * has been successfully inserted. However a thread could hit the
- * breakpoint we just inserted even before the probe count is
- * incremented. If this is the first breakpoint placed, breakpoint
- * notifier might ignore uprobes and pass the trap to the thread.
- * Hence increment before and decrement on failure.
+ * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
+ * the task can hit this breakpoint right after __replace_page().
*/
- atomic_inc(&mm->uprobes_state.count);
+ first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
+ if (first_uprobe)
+ set_bit(MMF_HAS_UPROBES, &mm->flags);
+
ret = set_swbp(&uprobe->arch, mm, vaddr);
- if (ret)
- atomic_dec(&mm->uprobes_state.count);
+ if (!ret)
+ clear_bit(MMF_RECALC_UPROBES, &mm->flags);
+ else if (first_uprobe)
+ clear_bit(MMF_HAS_UPROBES, &mm->flags);
return ret;
}
@@ -699,8 +695,12 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
static void
remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
{
- if (!set_orig_insn(&uprobe->arch, mm, vaddr, true))
- atomic_dec(&mm->uprobes_state.count);
+ /* can happen if uprobe_register() fails */
+ if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
+ return;
+
+ set_bit(MMF_RECALC_UPROBES, &mm->flags);
+ set_orig_insn(&uprobe->arch, mm, vaddr);
}
/*
@@ -710,11 +710,9 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad
*/
static void delete_uprobe(struct uprobe *uprobe)
{
- unsigned long flags;
-
- spin_lock_irqsave(&uprobes_treelock, flags);
+ spin_lock(&uprobes_treelock);
rb_erase(&uprobe->rb_node, &uprobes_tree);
- spin_unlock_irqrestore(&uprobes_treelock, flags);
+ spin_unlock(&uprobes_treelock);
iput(uprobe->inode);
put_uprobe(uprobe);
atomic_dec(&uprobe_events);
@@ -831,17 +829,11 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
goto unlock;
- if (is_register) {
+ if (is_register)
err = install_breakpoint(uprobe, mm, vma, info->vaddr);
- /*
- * We can race against uprobe_mmap(), see the
- * comment near uprobe_hash().
- */
- if (err == -EEXIST)
- err = 0;
- } else {
+ else
remove_breakpoint(uprobe, mm, info->vaddr);
- }
+
unlock:
up_write(&mm->mmap_sem);
free:
@@ -908,7 +900,8 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
}
mutex_unlock(uprobes_hash(inode));
- put_uprobe(uprobe);
+ if (uprobe)
+ put_uprobe(uprobe);
return ret;
}
@@ -978,7 +971,6 @@ static void build_probe_list(struct inode *inode,
struct list_head *head)
{
loff_t min, max;
- unsigned long flags;
struct rb_node *n, *t;
struct uprobe *u;
@@ -986,7 +978,7 @@ static void build_probe_list(struct inode *inode,
min = vaddr_to_offset(vma, start);
max = min + (end - start) - 1;
- spin_lock_irqsave(&uprobes_treelock, flags);
+ spin_lock(&uprobes_treelock);
n = find_node_in_range(inode, min, max);
if (n) {
for (t = n; t; t = rb_prev(t)) {
@@ -1004,27 +996,20 @@ static void build_probe_list(struct inode *inode,
atomic_inc(&u->ref);
}
}
- spin_unlock_irqrestore(&uprobes_treelock, flags);
+ spin_unlock(&uprobes_treelock);
}
/*
- * Called from mmap_region.
- * called with mm->mmap_sem acquired.
+ * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
*
- * Return -ve no if we fail to insert probes and we cannot
- * bail-out.
- * Return 0 otherwise. i.e:
- *
- * - successful insertion of probes
- * - (or) no possible probes to be inserted.
- * - (or) insertion of probes failed but we can bail-out.
+ * Currently we ignore all errors and always return 0, the callers
+ * can't handle the failure anyway.
*/
int uprobe_mmap(struct vm_area_struct *vma)
{
struct list_head tmp_list;
struct uprobe *uprobe, *u;
struct inode *inode;
- int ret, count;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
return 0;
@@ -1036,44 +1021,35 @@ int uprobe_mmap(struct vm_area_struct *vma)
mutex_lock(uprobes_mmap_hash(inode));
build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
- ret = 0;
- count = 0;
-
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
- if (!ret) {
+ if (!fatal_signal_pending(current)) {
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
-
- ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
- /*
- * We can race against uprobe_register(), see the
- * comment near uprobe_hash().
- */
- if (ret == -EEXIST) {
- ret = 0;
-
- if (!is_swbp_at_addr(vma->vm_mm, vaddr))
- continue;
-
- /*
- * Unable to insert a breakpoint, but
- * breakpoint lies underneath. Increment the
- * probe count.
- */
- atomic_inc(&vma->vm_mm->uprobes_state.count);
- }
-
- if (!ret)
- count++;
+ install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
}
put_uprobe(uprobe);
}
-
mutex_unlock(uprobes_mmap_hash(inode));
- if (ret)
- atomic_sub(count, &vma->vm_mm->uprobes_state.count);
+ return 0;
+}
- return ret;
+static bool
+vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ loff_t min, max;
+ struct inode *inode;
+ struct rb_node *n;
+
+ inode = vma->vm_file->f_mapping->host;
+
+ min = vaddr_to_offset(vma, start);
+ max = min + (end - start) - 1;
+
+ spin_lock(&uprobes_treelock);
+ n = find_node_in_range(inode, min, max);
+ spin_unlock(&uprobes_treelock);
+
+ return !!n;
}
/*
@@ -1081,37 +1057,18 @@ int uprobe_mmap(struct vm_area_struct *vma)
*/
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct list_head tmp_list;
- struct uprobe *uprobe, *u;
- struct inode *inode;
-
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
return;
if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
return;
- if (!atomic_read(&vma->vm_mm->uprobes_state.count))
- return;
-
- inode = vma->vm_file->f_mapping->host;
- if (!inode)
+ if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
+ test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
return;
- mutex_lock(uprobes_mmap_hash(inode));
- build_probe_list(inode, vma, start, end, &tmp_list);
-
- list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
- unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
- /*
- * An unregister could have removed the probe before
- * unmap. So check before we decrement the count.
- */
- if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
- atomic_dec(&vma->vm_mm->uprobes_state.count);
- put_uprobe(uprobe);
- }
- mutex_unlock(uprobes_mmap_hash(inode));
+ if (vma_has_uprobes(vma, start, end))
+ set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
}
/* Slot allocation for XOL */
@@ -1213,13 +1170,15 @@ void uprobe_clear_state(struct mm_struct *mm)
kfree(area);
}
-/*
- * uprobe_reset_state - Free the area allocated for slots.
- */
-void uprobe_reset_state(struct mm_struct *mm)
+void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{
- mm->uprobes_state.xol_area = NULL;
- atomic_set(&mm->uprobes_state.count, 0);
+ newmm->uprobes_state.xol_area = NULL;
+
+ if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
+ set_bit(MMF_HAS_UPROBES, &newmm->flags);
+ /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
+ set_bit(MMF_RECALC_UPROBES, &newmm->flags);
+ }
}
/*
@@ -1437,6 +1396,25 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
return false;
}
+static void mmf_recalc_uprobes(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (!valid_vma(vma, false))
+ continue;
+ /*
+ * This is not strictly accurate, we can race with
+ * uprobe_unregister() and see the already removed
+ * uprobe if delete_uprobe() was not yet called.
+ */
+ if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
+ return;
+ }
+
+ clear_bit(MMF_HAS_UPROBES, &mm->flags);
+}
+
static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
{
struct mm_struct *mm = current->mm;
@@ -1458,11 +1436,24 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
} else {
*is_swbp = -EFAULT;
}
+
+ if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
+ mmf_recalc_uprobes(mm);
up_read(&mm->mmap_sem);
return uprobe;
}
+void __weak arch_uprobe_enable_step(struct arch_uprobe *arch)
+{
+ user_enable_single_step(current);
+}
+
+void __weak arch_uprobe_disable_step(struct arch_uprobe *arch)
+{
+ user_disable_single_step(current);
+}
+
/*
* Run handler and ask thread to singlestep.
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
@@ -1509,7 +1500,7 @@ static void handle_swbp(struct pt_regs *regs)
utask->state = UTASK_SSTEP;
if (!pre_ssout(uprobe, regs, bp_vaddr)) {
- user_enable_single_step(current);
+ arch_uprobe_enable_step(&uprobe->arch);
return;
}
@@ -1518,17 +1509,15 @@ cleanup_ret:
utask->active_uprobe = NULL;
utask->state = UTASK_RUNNING;
}
- if (uprobe) {
- if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
+ if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
- /*
- * cannot singlestep; cannot skip instruction;
- * re-execute the instruction.
- */
- instruction_pointer_set(regs, bp_vaddr);
+ /*
+ * cannot singlestep; cannot skip instruction;
+ * re-execute the instruction.
+ */
+ instruction_pointer_set(regs, bp_vaddr);
- put_uprobe(uprobe);
- }
+ put_uprobe(uprobe);
}
/*
@@ -1547,10 +1536,10 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
else
WARN_ON_ONCE(1);
+ arch_uprobe_disable_step(&uprobe->arch);
put_uprobe(uprobe);
utask->active_uprobe = NULL;
utask->state = UTASK_RUNNING;
- user_disable_single_step(current);
xol_free_insn_slot(current);
spin_lock_irq(&current->sighand->siglock);
@@ -1589,8 +1578,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
{
struct uprobe_task *utask;
- if (!current->mm || !atomic_read(&current->mm->uprobes_state.count))
- /* task is currently not uprobed */
+ if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
return 0;
utask = current->utask;
diff --git a/kernel/fork.c b/kernel/fork.c
index 2c8857e1285..2343c9eaaaf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -353,6 +353,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
+ uprobe_dup_mmap(oldmm, mm);
/*
* Not linked in yet - no deadlock potential:
*/
@@ -454,9 +455,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
-
- if (file)
- uprobe_mmap(tmp);
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
@@ -839,8 +837,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
- uprobe_reset_state(mm);
-
if (!mm_init(mm, tsk))
goto fail_nomem;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c62b8546cc9..098f396aa40 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
{
LIST_HEAD(free_list);
+ mutex_lock(&kprobe_mutex);
/* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex);
- mutex_lock(&kprobe_mutex);
/*
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
/* Step 4: Free cleaned kprobes after quiesence period */
do_free_cleaned_kprobes(&free_list);
- mutex_unlock(&kprobe_mutex);
mutex_unlock(&module_mutex);
+ mutex_unlock(&kprobe_mutex);
/* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
struct kprobe *ap;
struct optimized_kprobe *op;
+ /* Impossible to optimize ftrace-based kprobe */
+ if (kprobe_ftrace(p))
+ return;
+
+ /* For preparing optimization, jump_label_text_reserved() is called */
+ jump_label_lock();
+ mutex_lock(&text_mutex);
+
ap = alloc_aggr_kprobe(p);
if (!ap)
- return;
+ goto out;
op = container_of(ap, struct optimized_kprobe, kp);
if (!arch_prepared_optinsn(&op->optinsn)) {
/* If failed to setup optimizing, fallback to kprobe */
arch_remove_optimized_kprobe(op);
kfree(op);
- return;
+ goto out;
}
init_aggr_kprobe(ap, p);
- optimize_kprobe(ap);
+ optimize_kprobe(ap); /* This just kicks optimizer thread */
+
+out:
+ mutex_unlock(&text_mutex);
+ jump_label_unlock();
}
#ifdef CONFIG_SYSCTL
@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
}
#endif /* CONFIG_OPTPROBES */
+#ifdef KPROBES_CAN_USE_FTRACE
+static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
+ .func = kprobe_ftrace_handler,
+ .flags = FTRACE_OPS_FL_SAVE_REGS,
+};
+static int kprobe_ftrace_enabled;
+
+/* Must ensure p->addr is really on ftrace */
+static int __kprobes prepare_kprobe(struct kprobe *p)
+{
+ if (!kprobe_ftrace(p))
+ return arch_prepare_kprobe(p);
+
+ return arch_prepare_kprobe_ftrace(p);
+}
+
+/* Caller must lock kprobe_mutex */
+static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
+{
+ int ret;
+
+ ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
+ (unsigned long)p->addr, 0, 0);
+ WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ kprobe_ftrace_enabled++;
+ if (kprobe_ftrace_enabled == 1) {
+ ret = register_ftrace_function(&kprobe_ftrace_ops);
+ WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+ }
+}
+
+/* Caller must lock kprobe_mutex */
+static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
+{
+ int ret;
+
+ kprobe_ftrace_enabled--;
+ if (kprobe_ftrace_enabled == 0) {
+ ret = unregister_ftrace_function(&kprobe_ftrace_ops);
+ WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+ }
+ ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
+ (unsigned long)p->addr, 1, 0);
+ WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+}
+#else /* !KPROBES_CAN_USE_FTRACE */
+#define prepare_kprobe(p) arch_prepare_kprobe(p)
+#define arm_kprobe_ftrace(p) do {} while (0)
+#define disarm_kprobe_ftrace(p) do {} while (0)
+#endif
+
/* Arm a kprobe with text_mutex */
static void __kprobes arm_kprobe(struct kprobe *kp)
{
+ if (unlikely(kprobe_ftrace(kp))) {
+ arm_kprobe_ftrace(kp);
+ return;
+ }
/*
* Here, since __arm_kprobe() doesn't use stop_machine(),
* this doesn't cause deadlock on text_mutex. So, we don't
@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
}
/* Disarm a kprobe with text_mutex */
-static void __kprobes disarm_kprobe(struct kprobe *kp)
+static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
{
+ if (unlikely(kprobe_ftrace(kp))) {
+ disarm_kprobe_ftrace(kp);
+ return;
+ }
/* Ditto */
mutex_lock(&text_mutex);
- __disarm_kprobe(kp, true);
+ __disarm_kprobe(kp, reopt);
mutex_unlock(&text_mutex);
}
@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler;
- if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
- ap->flags &= ~KPROBE_FLAG_DISABLED;
- if (!kprobes_all_disarmed)
- /* Arm the breakpoint again. */
- __arm_kprobe(ap);
- }
return 0;
}
@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
int ret = 0;
struct kprobe *ap = orig_p;
+ /* For preparing optimization, jump_label_text_reserved() is called */
+ jump_label_lock();
+ /*
+ * Get online CPUs to avoid text_mutex deadlock.with stop machine,
+ * which is invoked by unoptimize_kprobe() in add_new_kprobe()
+ */
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+
if (!kprobe_aggrprobe(orig_p)) {
/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
ap = alloc_aggr_kprobe(orig_p);
- if (!ap)
- return -ENOMEM;
+ if (!ap) {
+ ret = -ENOMEM;
+ goto out;
+ }
init_aggr_kprobe(ap, orig_p);
} else if (kprobe_unused(ap))
/* This probe is going to die. Rescue it */
@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
* free aggr_probe. It will be used next time, or
* freed by unregister_kprobe.
*/
- return ret;
+ goto out;
/* Prepare optimized instructions if possible. */
prepare_optimized_kprobe(ap);
@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
/* Copy ap's insn slot to p */
copy_kprobe(ap, p);
- return add_new_kprobe(ap, p);
+ ret = add_new_kprobe(ap, p);
+
+out:
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+ jump_label_unlock();
+
+ if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
+ ap->flags &= ~KPROBE_FLAG_DISABLED;
+ if (!kprobes_all_disarmed)
+ /* Arm the breakpoint again. */
+ arm_kprobe(ap);
+ }
+ return ret;
}
static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -1313,71 +1402,96 @@ static inline int check_kprobe_rereg(struct kprobe *p)
return ret;
}
-int __kprobes register_kprobe(struct kprobe *p)
+static __kprobes int check_kprobe_address_safe(struct kprobe *p,
+ struct module **probed_mod)
{
int ret = 0;
- struct kprobe *old_p;
- struct module *probed_mod;
- kprobe_opcode_t *addr;
-
- addr = kprobe_addr(p);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- p->addr = addr;
+ unsigned long ftrace_addr;
- ret = check_kprobe_rereg(p);
- if (ret)
- return ret;
+ /*
+ * If the address is located on a ftrace nop, set the
+ * breakpoint to the following instruction.
+ */
+ ftrace_addr = ftrace_location((unsigned long)p->addr);
+ if (ftrace_addr) {
+#ifdef KPROBES_CAN_USE_FTRACE
+ /* Given address is not on the instruction boundary */
+ if ((unsigned long)p->addr != ftrace_addr)
+ return -EILSEQ;
+ p->flags |= KPROBE_FLAG_FTRACE;
+#else /* !KPROBES_CAN_USE_FTRACE */
+ return -EINVAL;
+#endif
+ }
jump_label_lock();
preempt_disable();
+
+ /* Ensure it is not in reserved area nor out of text */
if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) ||
- ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr)) {
ret = -EINVAL;
- goto cannot_probe;
+ goto out;
}
- /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
- p->flags &= KPROBE_FLAG_DISABLED;
-
- /*
- * Check if are we probing a module.
- */
- probed_mod = __module_text_address((unsigned long) p->addr);
- if (probed_mod) {
- /* Return -ENOENT if fail. */
- ret = -ENOENT;
+ /* Check if are we probing a module */
+ *probed_mod = __module_text_address((unsigned long) p->addr);
+ if (*probed_mod) {
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
*/
- if (unlikely(!try_module_get(probed_mod)))
- goto cannot_probe;
+ if (unlikely(!try_module_get(*probed_mod))) {
+ ret = -ENOENT;
+ goto out;
+ }
/*
* If the module freed .init.text, we couldn't insert
* kprobes in there.
*/
- if (within_module_init((unsigned long)p->addr, probed_mod) &&
- probed_mod->state != MODULE_STATE_COMING) {
- module_put(probed_mod);
- goto cannot_probe;
+ if (within_module_init((unsigned long)p->addr, *probed_mod) &&
+ (*probed_mod)->state != MODULE_STATE_COMING) {
+ module_put(*probed_mod);
+ *probed_mod = NULL;
+ ret = -ENOENT;
}
- /* ret will be updated by following code */
}
+out:
preempt_enable();
jump_label_unlock();
+ return ret;
+}
+
+int __kprobes register_kprobe(struct kprobe *p)
+{
+ int ret;
+ struct kprobe *old_p;
+ struct module *probed_mod;
+ kprobe_opcode_t *addr;
+
+ /* Adjust probe address from symbol */
+ addr = kprobe_addr(p);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ p->addr = addr;
+
+ ret = check_kprobe_rereg(p);
+ if (ret)
+ return ret;
+
+ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
+ p->flags &= KPROBE_FLAG_DISABLED;
p->nmissed = 0;
INIT_LIST_HEAD(&p->list);
- mutex_lock(&kprobe_mutex);
- jump_label_lock(); /* needed to call jump_label_text_reserved() */
+ ret = check_kprobe_address_safe(p, &probed_mod);
+ if (ret)
+ return ret;
- get_online_cpus(); /* For avoiding text_mutex deadlock. */
- mutex_lock(&text_mutex);
+ mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
@@ -1386,7 +1500,9 @@ int __kprobes register_kprobe(struct kprobe *p)
goto out;
}
- ret = arch_prepare_kprobe(p);
+ mutex_lock(&text_mutex); /* Avoiding text modification */
+ ret = prepare_kprobe(p);
+ mutex_unlock(&text_mutex);
if (ret)
goto out;
@@ -1395,26 +1511,18 @@ int __kprobes register_kprobe(struct kprobe *p)
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p))
- __arm_kprobe(p);
+ arm_kprobe(p);
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
out:
- mutex_unlock(&text_mutex);
- put_online_cpus();
- jump_label_unlock();
mutex_unlock(&kprobe_mutex);
if (probed_mod)
module_put(probed_mod);
return ret;
-
-cannot_probe:
- preempt_enable();
- jump_label_unlock();
- return ret;
}
EXPORT_SYMBOL_GPL(register_kprobe);
@@ -1451,7 +1559,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
- disarm_kprobe(orig_p);
+ disarm_kprobe(orig_p, true);
orig_p->flags |= KPROBE_FLAG_DISABLED;
}
}
@@ -2049,10 +2157,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
if (!pp)
pp = p;
- seq_printf(pi, "%s%s%s\n",
+ seq_printf(pi, "%s%s%s%s\n",
(kprobe_gone(p) ? "[GONE]" : ""),
((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
- (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
+ (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
+ (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
}
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -2131,14 +2240,12 @@ static void __kprobes arm_all_kprobes(void)
goto already_enabled;
/* Arming kprobes doesn't optimize kprobe itself */
- mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_disabled(p))
- __arm_kprobe(p);
+ arm_kprobe(p);
}
- mutex_unlock(&text_mutex);
kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n");
@@ -2166,15 +2273,13 @@ static void __kprobes disarm_all_kprobes(void)
kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
- mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
- __disarm_kprobe(p, false);
+ disarm_kprobe(p, false);
}
}
- mutex_unlock(&text_mutex);
mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8c4c07071cc..4cea4f41c1d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS
help
See Documentation/trace/ftrace-design.txt
+config HAVE_FENTRY
+ bool
+ help
+ Arch supports the gcc options -pg with -mfentry
+
config HAVE_C_RECORDMCOUNT
bool
help
@@ -57,8 +62,12 @@ config HAVE_C_RECORDMCOUNT
config TRACER_MAX_TRACE
bool
+config TRACE_CLOCK
+ bool
+
config RING_BUFFER
bool
+ select TRACE_CLOCK
config FTRACE_NMI_ENTER
bool
@@ -109,6 +118,7 @@ config TRACING
select NOP_TRACER
select BINARY_PRINTF
select EVENT_TRACING
+ select TRACE_CLOCK
config GENERIC_TRACER
bool
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index b831087c820..d7e2068e4b7 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+ifdef CONFIG_FTRACE_SELFTEST
# selftest needs instrumentation
CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o
endif
+endif
# If unlikely tracing is enabled, do not trace these files
ifdef CONFIG_TRACING_BRANCHES
@@ -17,11 +19,7 @@ endif
CFLAGS_trace_events_filter.o := -I$(src)
-#
-# Make the trace clocks available generally: it's infrastructure
-# relied on by ptrace for example:
-#
-obj-y += trace_clock.o
+obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b4f20fba09f..9dcf15d3838 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,12 +64,20 @@
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+static struct ftrace_ops ftrace_list_end __read_mostly = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+};
+
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
/* Quick disabling of function tracer. */
-int function_trace_stop;
+int function_trace_stop __read_mostly;
+
+/* Current function tracing op */
+struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
/* List for set_ftrace_pid's pids. */
LIST_HEAD(ftrace_pids);
@@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly;
static DEFINE_MUTEX(ftrace_lock);
-static struct ftrace_ops ftrace_list_end __read_mostly = {
- .func = ftrace_stub,
-};
-
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
-ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
-static void
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
+#if ARCH_SUPPORTS_FTRACE_OPS
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+#else
+/* See comment below, where ftrace_ops_list_func is defined */
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
+#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+#endif
+
+/**
+ * ftrace_nr_registered_ops - return number of ops registered
+ *
+ * Returns the number of ftrace_ops registered and tracing functions
+ */
+int ftrace_nr_registered_ops(void)
+{
+ struct ftrace_ops *ops;
+ int cnt = 0;
+
+ mutex_lock(&ftrace_lock);
+
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next)
+ cnt++;
+
+ mutex_unlock(&ftrace_lock);
+
+ return cnt;
+}
/*
* Traverse the ftrace_global_list, invoking all entries. The reason that we
@@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
-static void ftrace_global_list_func(unsigned long ip,
- unsigned long parent_ip)
+static void
+ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
{
- struct ftrace_ops *op;
-
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
return;
trace_recursion_set(TRACE_GLOBAL_BIT);
op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) {
- op->func(ip, parent_ip);
+ op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); /*see above*/
};
trace_recursion_clear(TRACE_GLOBAL_BIT);
}
-static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
+static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
{
if (!test_tsk_trace_trace(current))
return;
- ftrace_pid_function(ip, parent_ip);
+ ftrace_pid_function(ip, parent_ip, op, regs);
}
static void set_ftrace_pid_function(ftrace_func_t func)
@@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func)
void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
- __ftrace_trace_function = ftrace_stub;
- __ftrace_trace_function_delay = ftrace_stub;
ftrace_pid_function = ftrace_stub;
}
-#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-/*
- * For those archs that do not test ftrace_trace_stop in their
- * mcount call site, we need to do it from C.
- */
-static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
-{
- if (function_trace_stop)
- return;
-
- __ftrace_trace_function(ip, parent_ip);
-}
-#endif
-
static void control_ops_disable_all(struct ftrace_ops *ops)
{
int cpu;
@@ -230,28 +243,27 @@ static void update_ftrace_function(void)
/*
* If we are at the end of the list and this ops is
- * not dynamic, then have the mcount trampoline call
- * the function directly
+ * recursion safe and not dynamic and the arch supports passing ops,
+ * then have the mcount trampoline call the function directly.
*/
if (ftrace_ops_list == &ftrace_list_end ||
(ftrace_ops_list->next == &ftrace_list_end &&
- !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
+ !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
+ (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
+ !FTRACE_FORCE_LIST_FUNC)) {
+ /* Set the ftrace_ops that the arch callback uses */
+ if (ftrace_ops_list == &global_ops)
+ function_trace_op = ftrace_global_list;
+ else
+ function_trace_op = ftrace_ops_list;
func = ftrace_ops_list->func;
- else
+ } else {
+ /* Just use the default ftrace_ops */
+ function_trace_op = &ftrace_list_end;
func = ftrace_ops_list_func;
+ }
-#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func;
-#else
-#ifdef CONFIG_DYNAMIC_FTRACE
- /* do not update till all functions have been modified */
- __ftrace_trace_function_delay = func;
-#else
- __ftrace_trace_function = func;
-#endif
- ftrace_trace_function =
- (func == ftrace_stub) ? func : ftrace_test_stop_func;
-#endif
}
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
@@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
return -EINVAL;
+#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+ /*
+ * If the ftrace_ops specifies SAVE_REGS, then it only can be used
+ * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
+ * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
+ */
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
+ !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
+ return -EINVAL;
+
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
+ ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
+#endif
+
if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
@@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
}
static void
-function_profile_call(unsigned long ip, unsigned long parent_ip)
+function_profile_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
{
struct ftrace_profile_stat *stat;
struct ftrace_profile *rec;
@@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
- function_profile_call(trace->func, 0);
+ function_profile_call(trace->func, 0, NULL, NULL);
return 1;
}
@@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void)
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int register_ftrace_profiler(void)
@@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = {
.func = ftrace_stub,
.notrace_hash = EMPTY_HASH,
.filter_hash = EMPTY_HASH,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static DEFINE_MUTEX(ftrace_regex_lock);
@@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
rec->flags++;
if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
return;
+ /*
+ * If any ops wants regs saved for this function
+ * then all ops will get saved regs.
+ */
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+ rec->flags |= FTRACE_FL_REGS;
} else {
if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
return;
@@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
if (enable && (rec->flags & ~FTRACE_FL_MASK))
flag = FTRACE_FL_ENABLED;
+ /*
+ * If enabling and the REGS flag does not match the REGS_EN, then
+ * do not ignore this record. Set flags to fail the compare against
+ * ENABLED.
+ */
+ if (flag &&
+ (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
+ flag |= FTRACE_FL_REGS;
+
/* If the state of this record hasn't changed, then do nothing */
if ((rec->flags & FTRACE_FL_ENABLED) == flag)
return FTRACE_UPDATE_IGNORE;
if (flag) {
- if (update)
+ /* Save off if rec is being enabled (for return value) */
+ flag ^= rec->flags & FTRACE_FL_ENABLED;
+
+ if (update) {
rec->flags |= FTRACE_FL_ENABLED;
- return FTRACE_UPDATE_MAKE_CALL;
+ if (flag & FTRACE_FL_REGS) {
+ if (rec->flags & FTRACE_FL_REGS)
+ rec->flags |= FTRACE_FL_REGS_EN;
+ else
+ rec->flags &= ~FTRACE_FL_REGS_EN;
+ }
+ }
+
+ /*
+ * If this record is being updated from a nop, then
+ * return UPDATE_MAKE_CALL.
+ * Otherwise, if the EN flag is set, then return
+ * UPDATE_MODIFY_CALL_REGS to tell the caller to convert
+ * from the non-save regs, to a save regs function.
+ * Otherwise,
+ * return UPDATE_MODIFY_CALL to tell the caller to convert
+ * from the save regs, to a non-save regs function.
+ */
+ if (flag & FTRACE_FL_ENABLED)
+ return FTRACE_UPDATE_MAKE_CALL;
+ else if (rec->flags & FTRACE_FL_REGS_EN)
+ return FTRACE_UPDATE_MODIFY_CALL_REGS;
+ else
+ return FTRACE_UPDATE_MODIFY_CALL;
}
- if (update)
- rec->flags &= ~FTRACE_FL_ENABLED;
+ if (update) {
+ /* If there's no more users, clear all flags */
+ if (!(rec->flags & ~FTRACE_FL_MASK))
+ rec->flags = 0;
+ else
+ /* Just disable the record (keep REGS state) */
+ rec->flags &= ~FTRACE_FL_ENABLED;
+ }
return FTRACE_UPDATE_MAKE_NOP;
}
@@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
+ unsigned long ftrace_old_addr;
unsigned long ftrace_addr;
int ret;
- ftrace_addr = (unsigned long)FTRACE_ADDR;
-
ret = ftrace_update_record(rec, enable);
+ if (rec->flags & FTRACE_FL_REGS)
+ ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
+ else
+ ftrace_addr = (unsigned long)FTRACE_ADDR;
+
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
@@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
case FTRACE_UPDATE_MAKE_NOP:
return ftrace_make_nop(NULL, rec, ftrace_addr);
+
+ case FTRACE_UPDATE_MODIFY_CALL_REGS:
+ case FTRACE_UPDATE_MODIFY_CALL:
+ if (rec->flags & FTRACE_FL_REGS)
+ ftrace_old_addr = (unsigned long)FTRACE_ADDR;
+ else
+ ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
+
+ return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
}
return -1; /* unknow ftrace bug */
@@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command)
*/
arch_ftrace_update_code(command);
-#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
- /*
- * For archs that call ftrace_test_stop_func(), we must
- * wait till after we update all the function callers
- * before we update the callback. This keeps different
- * ops that record different functions from corrupting
- * each other.
- */
- __ftrace_trace_function = __ftrace_trace_function_delay;
-#endif
function_trace_stop--;
ret = ftrace_arch_code_modify_post_process();
@@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, "%ps", (void *)rec->ip);
if (iter->flags & FTRACE_ITER_ENABLED)
- seq_printf(m, " (%ld)",
- rec->flags & ~FTRACE_FL_MASK);
+ seq_printf(m, " (%ld)%s",
+ rec->flags & ~FTRACE_FL_MASK,
+ rec->flags & FTRACE_FL_REGS ? " R" : "");
seq_printf(m, "\n");
return 0;
@@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void)
}
device_initcall(ftrace_mod_cmd_init);
-static void
-function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct ftrace_func_probe *entry;
struct hlist_head *hhd;
@@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
}
static int
-ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
- int reset, int enable)
+ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+{
+ struct ftrace_func_entry *entry;
+
+ if (!ftrace_location(ip))
+ return -EINVAL;
+
+ if (remove) {
+ entry = ftrace_lookup_ip(hash, ip);
+ if (!entry)
+ return -ENOENT;
+ free_hash_entry(hash, entry);
+ return 0;
+ }
+
+ return add_hash_entry(hash, ip);
+}
+
+static int
+ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
+ unsigned long ip, int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
struct ftrace_hash *hash;
@@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
ret = -EINVAL;
goto out_regex_unlock;
}
+ if (ip) {
+ ret = ftrace_match_addr(hash, ip, remove);
+ if (ret < 0)
+ goto out_regex_unlock;
+ }
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
@@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
return ret;
}
+static int
+ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
+ int reset, int enable)
+{
+ return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
+}
+
+/**
+ * ftrace_set_filter_ip - set a function to filter on in ftrace by address
+ * @ops - the ops to set the filter with
+ * @ip - the address to add to or remove from the filter.
+ * @remove - non zero to remove the ip from the filter
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Filters denote which functions should be enabled when tracing is enabled
+ * If @ip is NULL, it failes to update filter.
+ */
+int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
+ int remove, int reset)
+{
+ return ftrace_set_addr(ops, ip, remove, reset, 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
+
+static int
+ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ int reset, int enable)
+{
+ return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
+}
+
/**
* ftrace_set_filter - set a function to filter on in ftrace
* @ops - the ops to set the filter with
@@ -3912,6 +4047,7 @@ void __init ftrace_init(void)
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int __init ftrace_nodyn_init(void)
@@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
#endif /* CONFIG_DYNAMIC_FTRACE */
static void
-ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
{
- struct ftrace_ops *op;
-
if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
return;
@@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
while (op != &ftrace_list_end) {
if (!ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip))
- op->func(ip, parent_ip);
+ op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next);
};
@@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops control_ops = {
.func = ftrace_ops_control_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
-static void
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
+static inline void
+__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ignored, struct pt_regs *regs)
{
struct ftrace_ops *op;
+ if (function_trace_stop)
+ return;
+
if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
return;
@@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
op = rcu_dereference_raw(ftrace_ops_list);
while (op != &ftrace_list_end) {
if (ftrace_ops_test(op, ip))
- op->func(ip, parent_ip);
+ op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next);
};
preempt_enable_notrace();
trace_recursion_clear(TRACE_INTERNAL_BIT);
}
+/*
+ * Some archs only support passing ip and parent_ip. Even though
+ * the list function ignores the op parameter, we do not want any
+ * C side effects, where a function is called without the caller
+ * sending a third parameter.
+ * Archs are to support both the regs and ftrace_ops at the same time.
+ * If they support ftrace_ops, it is assumed they support regs.
+ * If call backs want to use regs, they must either check for regs
+ * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
+ * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
+ * An architecture can pass partial regs with ftrace_ops and still
+ * set the ARCH_SUPPORT_FTARCE_OPS.
+ */
+#if ARCH_SUPPORTS_FTRACE_OPS
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
+{
+ __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
+}
+#else
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+{
+ __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
+}
+#endif
+
static void clear_ftrace_swapper(void)
{
struct task_struct *p;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 49491fa7daa..b32ed0e385a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2816,7 +2816,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
* to the buffer after this will fail and return NULL.
*
* This is different than ring_buffer_record_disable() as
- * it works like an on/off switch, where as the disable() verison
+ * it works like an on/off switch, where as the disable() version
* must be paired with a enable().
*/
void ring_buffer_record_off(struct ring_buffer *buffer)
@@ -2839,7 +2839,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
* ring_buffer_record_off().
*
* This is different than ring_buffer_record_enable() as
- * it works like an on/off switch, where as the enable() verison
+ * it works like an on/off switch, where as the enable() version
* must be paired with a disable().
*/
void ring_buffer_record_on(struct ring_buffer *buffer)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5c38c81496c..1ec5c1dab62 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -328,7 +328,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
- TRACE_ITER_IRQ_INFO;
+ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@ -426,15 +426,15 @@ __setup("trace_buf_size=", set_buf_size);
static int __init set_tracing_thresh(char *str)
{
- unsigned long threshhold;
+ unsigned long threshold;
int ret;
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &threshhold);
+ ret = strict_strtoul(str, 0, &threshold);
if (ret < 0)
return 0;
- tracing_thresh = threshhold * 1000;
+ tracing_thresh = threshold * 1000;
return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);
@@ -470,6 +470,7 @@ static const char *trace_options[] = {
"overwrite",
"disable_on_free",
"irq-info",
+ "markers",
NULL
};
@@ -3886,6 +3887,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (tracing_disabled)
return -EINVAL;
+ if (!(trace_flags & TRACE_ITER_MARKERS))
+ return -EINVAL;
+
if (cnt > TRACE_BUF_SIZE)
cnt = TRACE_BUF_SIZE;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 55e1f7f0db1..63a2da0b9a6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]);
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
+#endif
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
-#endif
extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled;
@@ -680,6 +680,7 @@ enum trace_iterator_flags {
TRACE_ITER_OVERWRITE = 0x200000,
TRACE_ITER_STOP_ON_FREE = 0x400000,
TRACE_ITER_IRQ_INFO = 0x800000,
+ TRACE_ITER_MARKERS = 0x1000000,
};
/*
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 8a6d2ee2086..84b1e045fab 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
#ifdef CONFIG_FUNCTION_TRACER
static void
-perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
+perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *pt_regs)
{
struct ftrace_entry *entry;
struct hlist_head *head;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29111da1d10..d608d09d08c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1199,6 +1199,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
return 0;
}
+static void event_remove(struct ftrace_event_call *call)
+{
+ ftrace_event_enable_disable(call, 0);
+ if (call->event.funcs)
+ __unregister_ftrace_event(&call->event);
+ list_del(&call->list);
+}
+
+static int event_init(struct ftrace_event_call *call)
+{
+ int ret = 0;
+
+ if (WARN_ON(!call->name))
+ return -EINVAL;
+
+ if (call->class->raw_init) {
+ ret = call->class->raw_init(call);
+ if (ret < 0 && ret != -ENOSYS)
+ pr_warn("Could not initialize trace events/%s\n",
+ call->name);
+ }
+
+ return ret;
+}
+
static int
__trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
const struct file_operations *id,
@@ -1209,19 +1234,9 @@ __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
struct dentry *d_events;
int ret;
- /* The linker may leave blanks */
- if (!call->name)
- return -EINVAL;
-
- if (call->class->raw_init) {
- ret = call->class->raw_init(call);
- if (ret < 0) {
- if (ret != -ENOSYS)
- pr_warning("Could not initialize trace events/%s\n",
- call->name);
- return ret;
- }
- }
+ ret = event_init(call);
+ if (ret < 0)
+ return ret;
d_events = event_trace_events_dir();
if (!d_events)
@@ -1272,13 +1287,10 @@ static void remove_subsystem_dir(const char *name)
*/
static void __trace_remove_event_call(struct ftrace_event_call *call)
{
- ftrace_event_enable_disable(call, 0);
- if (call->event.funcs)
- __unregister_ftrace_event(&call->event);
- debugfs_remove_recursive(call->dir);
- list_del(&call->list);
+ event_remove(call);
trace_destroy_fields(call);
destroy_preds(call);
+ debugfs_remove_recursive(call->dir);
remove_subsystem_dir(call->class->system);
}
@@ -1450,15 +1462,43 @@ static __init int setup_trace_event(char *str)
}
__setup("trace_event=", setup_trace_event);
+static __init int event_trace_enable(void)
+{
+ struct ftrace_event_call **iter, *call;
+ char *buf = bootup_event_buf;
+ char *token;
+ int ret;
+
+ for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
+
+ call = *iter;
+ ret = event_init(call);
+ if (!ret)
+ list_add(&call->list, &ftrace_events);
+ }
+
+ while (true) {
+ token = strsep(&buf, ",");
+
+ if (!token)
+ break;
+ if (!*token)
+ continue;
+
+ ret = ftrace_set_clr_event(token, 1);
+ if (ret)
+ pr_warn("Failed to enable trace event: %s\n", token);
+ }
+ return 0;
+}
+
static __init int event_trace_init(void)
{
- struct ftrace_event_call **call;
+ struct ftrace_event_call *call;
struct dentry *d_tracer;
struct dentry *entry;
struct dentry *d_events;
int ret;
- char *buf = bootup_event_buf;
- char *token;
d_tracer = tracing_init_dentry();
if (!d_tracer)
@@ -1497,24 +1537,19 @@ static __init int event_trace_init(void)
if (trace_define_common_fields())
pr_warning("tracing: Failed to allocate common fields");
- for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
- __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
+ /*
+ * Early initialization already enabled ftrace event.
+ * Now it's only necessary to create the event directory.
+ */
+ list_for_each_entry(call, &ftrace_events, list) {
+
+ ret = event_create_dir(call, d_events,
+ &ftrace_event_id_fops,
&ftrace_enable_fops,
&ftrace_event_filter_fops,
&ftrace_event_format_fops);
- }
-
- while (true) {
- token = strsep(&buf, ",");
-
- if (!token)
- break;
- if (!*token)
- continue;
-
- ret = ftrace_set_clr_event(token, 1);
- if (ret)
- pr_warning("Failed to enable trace event: %s\n", token);
+ if (ret < 0)
+ event_remove(call);
}
ret = register_module_notifier(&trace_module_nb);
@@ -1523,6 +1558,7 @@ static __init int event_trace_init(void)
return 0;
}
+core_initcall(event_trace_enable);
fs_initcall(event_trace_init);
#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -1646,9 +1682,11 @@ static __init void event_trace_self_tests(void)
event_test_stuff();
ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
- if (WARN_ON_ONCE(ret))
+ if (WARN_ON_ONCE(ret)) {
pr_warning("error disabling system %s\n",
system->name);
+ continue;
+ }
pr_cont("OK\n");
}
@@ -1681,7 +1719,8 @@ static __init void event_trace_self_tests(void)
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void
-function_test_events_call(unsigned long ip, unsigned long parent_ip)
+function_test_events_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct ring_buffer_event *event;
struct ring_buffer *buffer;
@@ -1720,6 +1759,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __initdata =
{
.func = function_test_events_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static __init void event_trace_self_test_with_function(void)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 431dba8b754..c154797a7ff 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -2002,7 +2002,7 @@ static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
static int __ftrace_function_set_filter(int filter, char *buf, int len,
struct function_filter_data *data)
{
- int i, re_cnt, ret;
+ int i, re_cnt, ret = -EINVAL;
int *reset;
char **re;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a426f410c06..483162a9f90 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -49,7 +49,8 @@ static void function_trace_start(struct trace_array *tr)
}
static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
@@ -84,7 +85,9 @@ enum {
static struct tracer_flags func_flags;
static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
+function_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
+
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
@@ -121,7 +124,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
}
static void
-function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
+function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
@@ -164,13 +168,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct tracer_opt func_opts[] = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index ce27c8ba8d3..99b4378393d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -143,7 +143,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
return;
}
-#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
/*
* The arch may choose to record the frame pointer used
* and check it here to make sure that it is what we expect it
@@ -154,6 +154,9 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
*
* Currently, x86_32 with optimize for size (-Os) makes the latest
* gcc do the above.
+ *
+ * Note, -mfentry does not use frame pointers, and this test
+ * is not needed if CC_USING_FENTRY is set.
*/
if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
ftrace_graph_stop();
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 99d20e92036..d98ee8283b2 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr,
* irqsoff uses its own tracer function to keep the overhead down:
*/
static void
-irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index ff791ea48b5..02170c00c41 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -108,7 +108,8 @@ out_enable:
* wakeup uses its own tracer function to keep the overhead down:
*/
static void
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 288541f977f..2c00a691a54 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip,
- unsigned long pip)
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
{
trace_selftest_test_probe1_cnt++;
}
static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip,
- unsigned long pip)
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
{
trace_selftest_test_probe2_cnt++;
}
static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip,
- unsigned long pip)
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
{
trace_selftest_test_probe3_cnt++;
}
static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip,
- unsigned long pip)
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
{
trace_selftest_test_global_cnt++;
}
static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip,
- unsigned long pip)
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
{
trace_selftest_test_dyn_cnt++;
}
static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_global = {
- .func = trace_selftest_test_global_func,
- .flags = FTRACE_OPS_FL_GLOBAL,
+ .func = trace_selftest_test_global_func,
+ .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static void print_counts(void)
@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
return ret;
}
+
+static int trace_selftest_recursion_cnt;
+static void trace_selftest_test_recursion_func(unsigned long ip,
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
+{
+ /*
+ * This function is registered without the recursion safe flag.
+ * The ftrace infrastructure should provide the recursion
+ * protection. If not, this will crash the kernel!
+ */
+ trace_selftest_recursion_cnt++;
+ DYN_FTRACE_TEST_NAME();
+}
+
+static void trace_selftest_test_recursion_safe_func(unsigned long ip,
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
+{
+ /*
+ * We said we would provide our own recursion. By calling
+ * this function again, we should recurse back into this function
+ * and count again. But this only happens if the arch supports
+ * all of ftrace features and nothing else is using the function
+ * tracing utility.
+ */
+ if (trace_selftest_recursion_cnt++)
+ return;
+ DYN_FTRACE_TEST_NAME();
+}
+
+static struct ftrace_ops test_rec_probe = {
+ .func = trace_selftest_test_recursion_func,
+};
+
+static struct ftrace_ops test_recsafe_probe = {
+ .func = trace_selftest_test_recursion_safe_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+};
+
+static int
+trace_selftest_function_recursion(void)
+{
+ int save_ftrace_enabled = ftrace_enabled;
+ int save_tracer_enabled = tracer_enabled;
+ char *func_name;
+ int len;
+ int ret;
+ int cnt;
+
+ /* The previous test PASSED */
+ pr_cont("PASSED\n");
+ pr_info("Testing ftrace recursion: ");
+
+
+ /* enable tracing, and record the filter function */
+ ftrace_enabled = 1;
+ tracer_enabled = 1;
+
+ /* Handle PPC64 '.' name */
+ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
+ len = strlen(func_name);
+
+ ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
+ if (ret) {
+ pr_cont("*Could not set filter* ");
+ goto out;
+ }
+
+ ret = register_ftrace_function(&test_rec_probe);
+ if (ret) {
+ pr_cont("*could not register callback* ");
+ goto out;
+ }
+
+ DYN_FTRACE_TEST_NAME();
+
+ unregister_ftrace_function(&test_rec_probe);
+
+ ret = -1;
+ if (trace_selftest_recursion_cnt != 1) {
+ pr_cont("*callback not called once (%d)* ",
+ trace_selftest_recursion_cnt);
+ goto out;
+ }
+
+ trace_selftest_recursion_cnt = 1;
+
+ pr_cont("PASSED\n");
+ pr_info("Testing ftrace recursion safe: ");
+
+ ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
+ if (ret) {
+ pr_cont("*Could not set filter* ");
+ goto out;
+ }
+
+ ret = register_ftrace_function(&test_recsafe_probe);
+ if (ret) {
+ pr_cont("*could not register callback* ");
+ goto out;
+ }
+
+ DYN_FTRACE_TEST_NAME();
+
+ unregister_ftrace_function(&test_recsafe_probe);
+
+ /*
+ * If arch supports all ftrace features, and no other task
+ * was on the list, we should be fine.
+ */
+ if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
+ cnt = 2; /* Should have recursed */
+ else
+ cnt = 1;
+
+ ret = -1;
+ if (trace_selftest_recursion_cnt != cnt) {
+ pr_cont("*callback not called expected %d times (%d)* ",
+ cnt, trace_selftest_recursion_cnt);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ ftrace_enabled = save_ftrace_enabled;
+ tracer_enabled = save_tracer_enabled;
+
+ return ret;
+}
#else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
+# define trace_selftest_function_recursion() ({ 0; })
#endif /* CONFIG_DYNAMIC_FTRACE */
+static enum {
+ TRACE_SELFTEST_REGS_START,
+ TRACE_SELFTEST_REGS_FOUND,
+ TRACE_SELFTEST_REGS_NOT_FOUND,
+} trace_selftest_regs_stat;
+
+static void trace_selftest_test_regs_func(unsigned long ip,
+ unsigned long pip,
+ struct ftrace_ops *op,
+ struct pt_regs *pt_regs)
+{
+ if (pt_regs)
+ trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
+ else
+ trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
+}
+
+static struct ftrace_ops test_regs_probe = {
+ .func = trace_selftest_test_regs_func,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
+};
+
+static int
+trace_selftest_function_regs(void)
+{
+ int save_ftrace_enabled = ftrace_enabled;
+ int save_tracer_enabled = tracer_enabled;
+ char *func_name;
+ int len;
+ int ret;
+ int supported = 0;
+
+#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+ supported = 1;
+#endif
+
+ /* The previous test PASSED */
+ pr_cont("PASSED\n");
+ pr_info("Testing ftrace regs%s: ",
+ !supported ? "(no arch support)" : "");
+
+ /* enable tracing, and record the filter function */
+ ftrace_enabled = 1;
+ tracer_enabled = 1;
+
+ /* Handle PPC64 '.' name */
+ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
+ len = strlen(func_name);
+
+ ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
+ /*
+ * If DYNAMIC_FTRACE is not set, then we just trace all functions.
+ * This test really doesn't care.
+ */
+ if (ret && ret != -ENODEV) {
+ pr_cont("*Could not set filter* ");
+ goto out;
+ }
+
+ ret = register_ftrace_function(&test_regs_probe);
+ /*
+ * Now if the arch does not support passing regs, then this should
+ * have failed.
+ */
+ if (!supported) {
+ if (!ret) {
+ pr_cont("*registered save-regs without arch support* ");
+ goto out;
+ }
+ test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
+ ret = register_ftrace_function(&test_regs_probe);
+ }
+ if (ret) {
+ pr_cont("*could not register callback* ");
+ goto out;
+ }
+
+
+ DYN_FTRACE_TEST_NAME();
+
+ unregister_ftrace_function(&test_regs_probe);
+
+ ret = -1;
+
+ switch (trace_selftest_regs_stat) {
+ case TRACE_SELFTEST_REGS_START:
+ pr_cont("*callback never called* ");
+ goto out;
+
+ case TRACE_SELFTEST_REGS_FOUND:
+ if (supported)
+ break;
+ pr_cont("*callback received regs without arch support* ");
+ goto out;
+
+ case TRACE_SELFTEST_REGS_NOT_FOUND:
+ if (!supported)
+ break;
+ pr_cont("*callback received NULL regs* ");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ ftrace_enabled = save_ftrace_enabled;
+ tracer_enabled = save_tracer_enabled;
+
+ return ret;
+}
+
/*
* Simple verification test of ftrace function tracer.
* Enable ftrace, sleep 1/10 second, and then read the trace
@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_startup_dynamic_tracing(trace, tr,
DYN_FTRACE_TEST_NAME);
+ if (ret)
+ goto out;
+ ret = trace_selftest_function_recursion();
+ if (ret)
+ goto out;
+
+ ret = trace_selftest_function_regs();
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
@@ -778,6 +1041,8 @@ static int trace_wakeup_test_thread(void *data)
set_current_state(TASK_INTERRUPTIBLE);
schedule();
+ complete(x);
+
/* we are awake, now wait to disappear */
while (!kthread_should_stop()) {
/*
@@ -821,24 +1086,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
/* reset the max latency */
tracing_max_latency = 0;
- /* sleep to let the RT thread sleep too */
- msleep(100);
+ while (p->on_rq) {
+ /*
+ * Sleep to make sure the RT thread is asleep too.
+ * On virtual machines we can't rely on timings,
+ * but we want to make sure this test still works.
+ */
+ msleep(100);
+ }
- /*
- * Yes this is slightly racy. It is possible that for some
- * strange reason that the RT thread we created, did not
- * call schedule for 100ms after doing the completion,
- * and we do a wakeup on a task that already is awake.
- * But that is extremely unlikely, and the worst thing that
- * happens in such a case, is that we disable tracing.
- * Honestly, if this race does happen something is horrible
- * wrong with the system.
- */
+ init_completion(&isrt);
wake_up_process(p);
- /* give a little time to let the thread wake up */
- msleep(100);
+ /* Wait for the task to wake up */
+ wait_for_completion(&isrt);
/* stop the tracing. */
tracing_stop();
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index d4545f49242..0c1b165778e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -111,7 +111,8 @@ static inline void check_stack(void)
}
static void
-stack_trace_call(unsigned long ip, unsigned long parent_ip)
+stack_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
int cpu;
@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static ssize_t
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 6b245f64c8d..2485a7d09b1 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -487,7 +487,7 @@ int __init init_ftrace_syscalls(void)
return 0;
}
-core_initcall(init_ftrace_syscalls);
+early_initcall(init_ftrace_syscalls);
#ifdef CONFIG_PERF_EVENTS
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 54e35c1e594..9d1421e63ff 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -261,11 +261,13 @@ static unsigned get_mcountsym(Elf_Sym const *const sym0,
&sym0[Elf_r_sym(relp)];
char const *symname = &str0[w(symp->st_name)];
char const *mcount = gpfx == '_' ? "_mcount" : "mcount";
+ char const *fentry = "__fentry__";
if (symname[0] == '.')
++symname; /* ppc64 hack */
if (strcmp(mcount, symname) == 0 ||
- (altmcount && strcmp(altmcount, symname) == 0))
+ (altmcount && strcmp(altmcount, symname) == 0) ||
+ (strcmp(fentry, symname) == 0))
mcountsym = Elf_r_sym(relp);
return mcountsym;
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 14131cb0522..04d959fa022 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -129,7 +129,7 @@ CFLAGS ?= -g -Wall
# Append required CFLAGS
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
-override CFLAGS += $(udis86-flags)
+override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
ifeq ($(VERBOSE),1)
Q =
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 5f34aa371b5..47264b4652b 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -24,13 +24,14 @@
* Frederic Weisbecker gave his permission to relicense the code to
* the Lesser General Public License.
*/
-#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <ctype.h>
#include <errno.h>
+#include <stdint.h>
+#include <limits.h>
#include "event-parse.h"
#include "event-utils.h"
@@ -117,14 +118,7 @@ void breakpoint(void)
struct print_arg *alloc_arg(void)
{
- struct print_arg *arg;
-
- arg = malloc_or_die(sizeof(*arg));
- if (!arg)
- return NULL;
- memset(arg, 0, sizeof(*arg));
-
- return arg;
+ return calloc(1, sizeof(struct print_arg));
}
struct cmdline {
@@ -158,7 +152,9 @@ static int cmdline_init(struct pevent *pevent)
struct cmdline *cmdlines;
int i;
- cmdlines = malloc_or_die(sizeof(*cmdlines) * pevent->cmdline_count);
+ cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
+ if (!cmdlines)
+ return -1;
i = 0;
while (cmdlist) {
@@ -186,8 +182,8 @@ static char *find_cmdline(struct pevent *pevent, int pid)
if (!pid)
return "<idle>";
- if (!pevent->cmdlines)
- cmdline_init(pevent);
+ if (!pevent->cmdlines && cmdline_init(pevent))
+ return "<not enough memory for cmdlines!>";
key.pid = pid;
@@ -215,8 +211,8 @@ int pevent_pid_is_registered(struct pevent *pevent, int pid)
if (!pid)
return 1;
- if (!pevent->cmdlines)
- cmdline_init(pevent);
+ if (!pevent->cmdlines && cmdline_init(pevent))
+ return 0;
key.pid = pid;
@@ -258,10 +254,14 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
return -1;
}
- cmdlines[pevent->cmdline_count].pid = pid;
cmdlines[pevent->cmdline_count].comm = strdup(comm);
- if (!cmdlines[pevent->cmdline_count].comm)
- die("malloc comm");
+ if (!cmdlines[pevent->cmdline_count].comm) {
+ free(cmdlines);
+ errno = ENOMEM;
+ return -1;
+ }
+
+ cmdlines[pevent->cmdline_count].pid = pid;
if (cmdlines[pevent->cmdline_count].comm)
pevent->cmdline_count++;
@@ -288,10 +288,15 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
if (pevent->cmdlines)
return add_new_comm(pevent, comm, pid);
- item = malloc_or_die(sizeof(*item));
+ item = malloc(sizeof(*item));
+ if (!item)
+ return -1;
+
item->comm = strdup(comm);
- if (!item->comm)
- die("malloc comm");
+ if (!item->comm) {
+ free(item);
+ return -1;
+ }
item->pid = pid;
item->next = pevent->cmdlist;
@@ -355,7 +360,10 @@ static int func_map_init(struct pevent *pevent)
struct func_map *func_map;
int i;
- func_map = malloc_or_die(sizeof(*func_map) * (pevent->func_count + 1));
+ func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1));
+ if (!func_map)
+ return -1;
+
funclist = pevent->funclist;
i = 0;
@@ -455,25 +463,36 @@ pevent_find_function_address(struct pevent *pevent, unsigned long long addr)
int pevent_register_function(struct pevent *pevent, char *func,
unsigned long long addr, char *mod)
{
- struct func_list *item;
+ struct func_list *item = malloc(sizeof(*item));
- item = malloc_or_die(sizeof(*item));
+ if (!item)
+ return -1;
item->next = pevent->funclist;
item->func = strdup(func);
- if (mod)
+ if (!item->func)
+ goto out_free;
+
+ if (mod) {
item->mod = strdup(mod);
- else
+ if (!item->mod)
+ goto out_free_func;
+ } else
item->mod = NULL;
item->addr = addr;
- if (!item->func || (mod && !item->mod))
- die("malloc func");
-
pevent->funclist = item;
pevent->func_count++;
return 0;
+
+out_free_func:
+ free(item->func);
+ item->func = NULL;
+out_free:
+ free(item);
+ errno = ENOMEM;
+ return -1;
}
/**
@@ -524,14 +543,16 @@ static int printk_cmp(const void *a, const void *b)
return 0;
}
-static void printk_map_init(struct pevent *pevent)
+static int printk_map_init(struct pevent *pevent)
{
struct printk_list *printklist;
struct printk_list *item;
struct printk_map *printk_map;
int i;
- printk_map = malloc_or_die(sizeof(*printk_map) * (pevent->printk_count + 1));
+ printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1));
+ if (!printk_map)
+ return -1;
printklist = pevent->printklist;
@@ -549,6 +570,8 @@ static void printk_map_init(struct pevent *pevent)
pevent->printk_map = printk_map;
pevent->printklist = NULL;
+
+ return 0;
}
static struct printk_map *
@@ -557,8 +580,8 @@ find_printk(struct pevent *pevent, unsigned long long addr)
struct printk_map *printk;
struct printk_map key;
- if (!pevent->printk_map)
- printk_map_init(pevent);
+ if (!pevent->printk_map && printk_map_init(pevent))
+ return NULL;
key.addr = addr;
@@ -580,21 +603,27 @@ find_printk(struct pevent *pevent, unsigned long long addr)
int pevent_register_print_string(struct pevent *pevent, char *fmt,
unsigned long long addr)
{
- struct printk_list *item;
+ struct printk_list *item = malloc(sizeof(*item));
- item = malloc_or_die(sizeof(*item));
+ if (!item)
+ return -1;
item->next = pevent->printklist;
- item->printk = strdup(fmt);
item->addr = addr;
+ item->printk = strdup(fmt);
if (!item->printk)
- die("malloc fmt");
+ goto out_free;
pevent->printklist = item;
pevent->printk_count++;
return 0;
+
+out_free:
+ free(item);
+ errno = ENOMEM;
+ return -1;
}
/**
@@ -619,24 +648,18 @@ void pevent_print_printk(struct pevent *pevent)
static struct event_format *alloc_event(void)
{
- struct event_format *event;
-
- event = malloc(sizeof(*event));
- if (!event)
- return NULL;
- memset(event, 0, sizeof(*event));
-
- return event;
+ return calloc(1, sizeof(struct event_format));
}
-static void add_event(struct pevent *pevent, struct event_format *event)
+static int add_event(struct pevent *pevent, struct event_format *event)
{
int i;
+ struct event_format **events = realloc(pevent->events, sizeof(event) *
+ (pevent->nr_events + 1));
+ if (!events)
+ return -1;
- pevent->events = realloc(pevent->events, sizeof(event) *
- (pevent->nr_events + 1));
- if (!pevent->events)
- die("Can not allocate events");
+ pevent->events = events;
for (i = 0; i < pevent->nr_events; i++) {
if (pevent->events[i]->id > event->id)
@@ -651,6 +674,8 @@ static void add_event(struct pevent *pevent, struct event_format *event)
pevent->nr_events++;
event->pevent = pevent;
+
+ return 0;
}
static int event_item_type(enum event_type type)
@@ -827,9 +852,9 @@ static enum event_type __read_token(char **tok)
switch (type) {
case EVENT_NEWLINE:
case EVENT_DELIM:
- *tok = malloc_or_die(2);
- (*tok)[0] = ch;
- (*tok)[1] = 0;
+ if (asprintf(tok, "%c", ch) < 0)
+ return EVENT_ERROR;
+
return type;
case EVENT_OP:
@@ -1240,8 +1265,10 @@ static int event_read_fields(struct event_format *event, struct format_field **f
last_token = token;
- field = malloc_or_die(sizeof(*field));
- memset(field, 0, sizeof(*field));
+ field = calloc(1, sizeof(*field));
+ if (!field)
+ goto fail;
+
field->event = event;
/* read the rest of the type */
@@ -1282,7 +1309,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
}
if (!field->type) {
- die("no type found");
+ do_warning("%s: no type found", __func__);
goto fail;
}
field->name = last_token;
@@ -1329,7 +1356,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
free_token(token);
type = read_token(&token);
if (type == EVENT_NONE) {
- die("failed to find token");
+ do_warning("failed to find token");
goto fail;
}
}
@@ -1538,6 +1565,14 @@ process_cond(struct event_format *event, struct print_arg *top, char **tok)
left = alloc_arg();
right = alloc_arg();
+ if (!arg || !left || !right) {
+ do_warning("%s: not enough memory!", __func__);
+ /* arg will be freed at out_free */
+ free_arg(left);
+ free_arg(right);
+ goto out_free;
+ }
+
arg->type = PRINT_OP;
arg->op.left = left;
arg->op.right = right;
@@ -1580,6 +1615,12 @@ process_array(struct event_format *event, struct print_arg *top, char **tok)
char *token = NULL;
arg = alloc_arg();
+ if (!arg) {
+ do_warning("%s: not enough memory!", __func__);
+ /* '*tok' is set to top->op.op. No need to free. */
+ *tok = NULL;
+ return EVENT_ERROR;
+ }
*tok = NULL;
type = process_arg(event, arg, &token);
@@ -1595,8 +1636,7 @@ process_array(struct event_format *event, struct print_arg *top, char **tok)
return type;
out_free:
- free_token(*tok);
- *tok = NULL;
+ free_token(token);
free_arg(arg);
return EVENT_ERROR;
}
@@ -1682,7 +1722,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
if (arg->type == PRINT_OP && !arg->op.left) {
/* handle single op */
if (token[1]) {
- die("bad op token %s", token);
+ do_warning("bad op token %s", token);
goto out_free;
}
switch (token[0]) {
@@ -1699,10 +1739,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
/* make an empty left */
left = alloc_arg();
+ if (!left)
+ goto out_warn_free;
+
left->type = PRINT_NULL;
arg->op.left = left;
right = alloc_arg();
+ if (!right)
+ goto out_warn_free;
+
arg->op.right = right;
/* do not free the token, it belongs to an op */
@@ -1712,6 +1758,9 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
} else if (strcmp(token, "?") == 0) {
left = alloc_arg();
+ if (!left)
+ goto out_warn_free;
+
/* copy the top arg to the left */
*left = *arg;
@@ -1720,6 +1769,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
arg->op.left = left;
arg->op.prio = 0;
+ /* it will set arg->op.right */
type = process_cond(event, arg, tok);
} else if (strcmp(token, ">>") == 0 ||
@@ -1739,6 +1789,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
strcmp(token, "!=") == 0) {
left = alloc_arg();
+ if (!left)
+ goto out_warn_free;
/* copy the top arg to the left */
*left = *arg;
@@ -1746,6 +1798,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
arg->type = PRINT_OP;
arg->op.op = token;
arg->op.left = left;
+ arg->op.right = NULL;
if (set_op_prio(arg) == -1) {
event->flags |= EVENT_FL_FAILED;
@@ -1762,12 +1815,14 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
char *new_atom;
- if (left->type != PRINT_ATOM)
- die("bad pointer type");
+ if (left->type != PRINT_ATOM) {
+ do_warning("bad pointer type");
+ goto out_free;
+ }
new_atom = realloc(left->atom.atom,
strlen(left->atom.atom) + 3);
if (!new_atom)
- goto out_free;
+ goto out_warn_free;
left->atom.atom = new_atom;
strcat(left->atom.atom, " *");
@@ -1779,12 +1834,18 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
}
right = alloc_arg();
+ if (!right)
+ goto out_warn_free;
+
type = process_arg_token(event, right, tok, type);
arg->op.right = right;
} else if (strcmp(token, "[") == 0) {
left = alloc_arg();
+ if (!left)
+ goto out_warn_free;
+
*left = *arg;
arg->type = PRINT_OP;
@@ -1793,6 +1854,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
arg->op.prio = 0;
+ /* it will set arg->op.right */
type = process_array(event, arg, tok);
} else {
@@ -1816,14 +1878,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
return type;
- out_free:
+out_warn_free:
+ do_warning("%s: not enough memory!", __func__);
+out_free:
free_token(token);
*tok = NULL;
return EVENT_ERROR;
}
static enum event_type
-process_entry(struct event_format *event __unused, struct print_arg *arg,
+process_entry(struct event_format *event __maybe_unused, struct print_arg *arg,
char **tok)
{
enum event_type type;
@@ -1880,7 +1944,11 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
return val;
}
- ref = malloc_or_die(len);
+ ref = malloc(len);
+ if (!ref) {
+ do_warning("%s: not enough memory!", __func__);
+ return val;
+ }
memcpy(ref, type, len);
/* chop off the " *" */
@@ -1957,8 +2025,10 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
static unsigned long long
eval_type(unsigned long long val, struct print_arg *arg, int pointer)
{
- if (arg->type != PRINT_TYPE)
- die("expected type argument");
+ if (arg->type != PRINT_TYPE) {
+ do_warning("expected type argument");
+ return 0;
+ }
return eval_type_str(val, arg->typecast.type, pointer);
}
@@ -2143,7 +2213,7 @@ static char *arg_eval (struct print_arg *arg)
case PRINT_STRING:
case PRINT_BSTRING:
default:
- die("invalid eval type %d", arg->type);
+ do_warning("invalid eval type %d", arg->type);
break;
}
@@ -2166,6 +2236,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
break;
arg = alloc_arg();
+ if (!arg)
+ goto out_free;
free_token(token);
type = process_arg(event, arg, &token);
@@ -2179,30 +2251,33 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
- field = malloc_or_die(sizeof(*field));
- memset(field, 0, sizeof(*field));
+ field = calloc(1, sizeof(*field));
+ if (!field)
+ goto out_free;
value = arg_eval(arg);
if (value == NULL)
- goto out_free;
+ goto out_free_field;
field->value = strdup(value);
if (field->value == NULL)
- goto out_free;
+ goto out_free_field;
free_arg(arg);
arg = alloc_arg();
+ if (!arg)
+ goto out_free;
free_token(token);
type = process_arg(event, arg, &token);
if (test_type_token(type, token, EVENT_OP, "}"))
- goto out_free;
+ goto out_free_field;
value = arg_eval(arg);
if (value == NULL)
- goto out_free;
+ goto out_free_field;
field->str = strdup(value);
if (field->str == NULL)
- goto out_free;
+ goto out_free_field;
free_arg(arg);
arg = NULL;
@@ -2216,6 +2291,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
*tok = token;
return type;
+out_free_field:
+ free_flag_sym(field);
out_free:
free_arg(arg);
free_token(token);
@@ -2235,6 +2312,10 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
arg->type = PRINT_FLAGS;
field = alloc_arg();
+ if (!field) {
+ do_warning("%s: not enough memory!", __func__);
+ goto out_free;
+ }
type = process_arg(event, field, &token);
@@ -2243,7 +2324,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
type = process_op(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
+ goto out_free_field;
free_token(token);
arg->flags.field = field;
@@ -2265,7 +2346,9 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
type = read_token_item(tok);
return type;
- out_free:
+out_free_field:
+ free_arg(field);
+out_free:
free_token(token);
*tok = NULL;
return EVENT_ERROR;
@@ -2282,10 +2365,14 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
arg->type = PRINT_SYMBOL;
field = alloc_arg();
+ if (!field) {
+ do_warning("%s: not enough memory!", __func__);
+ goto out_free;
+ }
type = process_arg(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
+ goto out_free_field;
arg->symbol.field = field;
@@ -2297,7 +2384,9 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
type = read_token_item(tok);
return type;
- out_free:
+out_free_field:
+ free_arg(field);
+out_free:
free_token(token);
*tok = NULL;
return EVENT_ERROR;
@@ -2314,6 +2403,11 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok)
arg->type = PRINT_HEX;
field = alloc_arg();
+ if (!field) {
+ do_warning("%s: not enough memory!", __func__);
+ goto out_free;
+ }
+
type = process_arg(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ","))
@@ -2324,6 +2418,12 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok)
free_token(token);
field = alloc_arg();
+ if (!field) {
+ do_warning("%s: not enough memory!", __func__);
+ *tok = NULL;
+ return EVENT_ERROR;
+ }
+
type = process_arg(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ")"))
@@ -2381,6 +2481,12 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
free_token(token);
arg = alloc_arg();
+ if (!field) {
+ do_warning("%s: not enough memory!", __func__);
+ *tok = NULL;
+ return EVENT_ERROR;
+ }
+
type = process_arg(event, arg, &token);
if (type == EVENT_ERROR)
goto out_free_arg;
@@ -2434,10 +2540,16 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok)
/* make this a typecast and contine */
/* prevous must be an atom */
- if (arg->type != PRINT_ATOM)
- die("previous needed to be PRINT_ATOM");
+ if (arg->type != PRINT_ATOM) {
+ do_warning("previous needed to be PRINT_ATOM");
+ goto out_free;
+ }
item_arg = alloc_arg();
+ if (!item_arg) {
+ do_warning("%s: not enough memory!", __func__);
+ goto out_free;
+ }
arg->type = PRINT_TYPE;
arg->typecast.type = arg->atom.atom;
@@ -2457,7 +2569,8 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok)
static enum event_type
-process_str(struct event_format *event __unused, struct print_arg *arg, char **tok)
+process_str(struct event_format *event __maybe_unused, struct print_arg *arg,
+ char **tok)
{
enum event_type type;
char *token;
@@ -2532,6 +2645,11 @@ process_func_handler(struct event_format *event, struct pevent_function_handler
next_arg = &(arg->func.args);
for (i = 0; i < func->nr_args; i++) {
farg = alloc_arg();
+ if (!farg) {
+ do_warning("%s: not enough memory!", __func__);
+ return EVENT_ERROR;
+ }
+
type = process_arg(event, farg, &token);
if (i < (func->nr_args - 1))
test = ",";
@@ -2676,7 +2794,8 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
case EVENT_ERROR ... EVENT_NEWLINE:
default:
- die("unexpected type %d", type);
+ do_warning("unexpected type %d", type);
+ return EVENT_ERROR;
}
*tok = token;
@@ -2697,6 +2816,10 @@ static int event_read_print_args(struct event_format *event, struct print_arg **
}
arg = alloc_arg();
+ if (!arg) {
+ do_warning("%s: not enough memory!", __func__);
+ return -1;
+ }
type = process_arg(event, arg, &token);
@@ -2768,10 +2891,8 @@ static int event_read_print(struct event_format *event)
if (type == EVENT_DQUOTE) {
char *cat;
- cat = malloc_or_die(strlen(event->print_fmt.format) +
- strlen(token) + 1);
- strcpy(cat, event->print_fmt.format);
- strcat(cat, token);
+ if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0)
+ goto fail;
free_token(token);
free_token(event->print_fmt.format);
event->print_fmt.format = NULL;
@@ -2925,8 +3046,10 @@ static int get_common_info(struct pevent *pevent,
* All events should have the same common elements.
* Pick any event to find where the type is;
*/
- if (!pevent->events)
- die("no event_list!");
+ if (!pevent->events) {
+ do_warning("no event_list!");
+ return -1;
+ }
event = pevent->events[0];
field = pevent_find_common_field(event, type);
@@ -3084,7 +3207,8 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
if (!arg->field.field) {
arg->field.field = pevent_find_any_field(event, arg->field.name);
if (!arg->field.field)
- die("field %s not found", arg->field.name);
+ goto out_warning_field;
+
}
/* must be a number */
val = pevent_read_number(pevent, data + arg->field.field->offset,
@@ -3145,8 +3269,10 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
if (!larg->field.field) {
larg->field.field =
pevent_find_any_field(event, larg->field.name);
- if (!larg->field.field)
- die("field %s not found", larg->field.name);
+ if (!larg->field.field) {
+ arg = larg;
+ goto out_warning_field;
+ }
}
field_size = larg->field.field->elementsize;
offset = larg->field.field->offset +
@@ -3182,7 +3308,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
val = left != right;
break;
default:
- die("unknown op '%s'", arg->op.op);
+ goto out_warning_op;
}
break;
case '~':
@@ -3212,7 +3338,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
val = left <= right;
break;
default:
- die("unknown op '%s'", arg->op.op);
+ goto out_warning_op;
}
break;
case '>':
@@ -3227,12 +3353,13 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
val = left >= right;
break;
default:
- die("unknown op '%s'", arg->op.op);
+ goto out_warning_op;
}
break;
case '=':
if (arg->op.op[1] != '=')
- die("unknown op '%s'", arg->op.op);
+ goto out_warning_op;
+
val = left == right;
break;
case '-':
@@ -3248,13 +3375,21 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
val = left * right;
break;
default:
- die("unknown op '%s'", arg->op.op);
+ goto out_warning_op;
}
break;
default: /* not sure what to do there */
return 0;
}
return val;
+
+out_warning_op:
+ do_warning("%s: unknown op '%s'", __func__, arg->op.op);
+ return 0;
+
+out_warning_field:
+ do_warning("%s: field %s not found", __func__, arg->field.name);
+ return 0;
}
struct flag {
@@ -3331,8 +3466,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
field = arg->field.field;
if (!field) {
field = pevent_find_any_field(event, arg->field.name);
- if (!field)
- die("field %s not found", arg->field.name);
+ if (!field) {
+ str = arg->field.name;
+ goto out_warning_field;
+ }
arg->field.field = field;
}
/* Zero sized fields, mean the rest of the data */
@@ -3349,7 +3486,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
trace_seq_printf(s, "%lx", addr);
break;
}
- str = malloc_or_die(len + 1);
+ str = malloc(len + 1);
+ if (!str) {
+ do_warning("%s: not enough memory!", __func__);
+ return;
+ }
memcpy(str, data + field->offset, len);
str[len] = 0;
print_str_to_seq(s, format, len_arg, str);
@@ -3389,7 +3530,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
str = arg->hex.field->field.name;
field = pevent_find_any_field(event, str);
if (!field)
- die("field %s not found", str);
+ goto out_warning_field;
arg->hex.field->field.field = field;
}
hex = data + field->offset;
@@ -3441,6 +3582,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
/* well... */
break;
}
+
+ return;
+
+out_warning_field:
+ do_warning("%s: field %s not found", __func__, arg->field.name);
}
static unsigned long long
@@ -3467,7 +3613,11 @@ process_defined_func(struct trace_seq *s, void *data, int size,
farg = arg->func.args;
param = func_handle->params;
- args = malloc_or_die(sizeof(*args) * func_handle->nr_args);
+ ret = ULLONG_MAX;
+ args = malloc(sizeof(*args) * func_handle->nr_args);
+ if (!args)
+ goto out;
+
for (i = 0; i < func_handle->nr_args; i++) {
switch (param->type) {
case PEVENT_FUNC_ARG_INT:
@@ -3479,13 +3629,19 @@ process_defined_func(struct trace_seq *s, void *data, int size,
trace_seq_init(&str);
print_str_arg(&str, data, size, event, "%s", -1, farg);
trace_seq_terminate(&str);
- string = malloc_or_die(sizeof(*string));
+ string = malloc(sizeof(*string));
+ if (!string) {
+ do_warning("%s(%d): malloc str", __func__, __LINE__);
+ goto out_free;
+ }
string->next = strings;
string->str = strdup(str.buffer);
- if (!string->str)
- die("malloc str");
-
- args[i] = (unsigned long long)string->str;
+ if (!string->str) {
+ free(string);
+ do_warning("%s(%d): malloc str", __func__, __LINE__);
+ goto out_free;
+ }
+ args[i] = (uintptr_t)string->str;
strings = string;
trace_seq_destroy(&str);
break;
@@ -3494,14 +3650,15 @@ process_defined_func(struct trace_seq *s, void *data, int size,
* Something went totally wrong, this is not
* an input error, something in this code broke.
*/
- die("Unexpected end of arguments\n");
- break;
+ do_warning("Unexpected end of arguments\n");
+ goto out_free;
}
farg = farg->next;
param = param->next;
}
ret = (*func_handle->func)(s, args);
+out_free:
free(args);
while (strings) {
string = strings;
@@ -3515,6 +3672,18 @@ process_defined_func(struct trace_seq *s, void *data, int size,
return ret;
}
+static void free_args(struct print_arg *args)
+{
+ struct print_arg *next;
+
+ while (args) {
+ next = args->next;
+
+ free_arg(args);
+ args = next;
+ }
+}
+
static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event)
{
struct pevent *pevent = event->pevent;
@@ -3530,11 +3699,15 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
if (!field) {
field = pevent_find_field(event, "buf");
- if (!field)
- die("can't find buffer field for binary printk");
+ if (!field) {
+ do_warning("can't find buffer field for binary printk");
+ return NULL;
+ }
ip_field = pevent_find_field(event, "ip");
- if (!ip_field)
- die("can't find ip field for binary printk");
+ if (!ip_field) {
+ do_warning("can't find ip field for binary printk");
+ return NULL;
+ }
pevent->bprint_buf_field = field;
pevent->bprint_ip_field = ip_field;
}
@@ -3545,13 +3718,18 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
* The first arg is the IP pointer.
*/
args = alloc_arg();
+ if (!args) {
+ do_warning("%s(%d): not enough memory!", __func__, __LINE__);
+ return NULL;
+ }
arg = args;
arg->next = NULL;
next = &arg->next;
arg->type = PRINT_ATOM;
- arg->atom.atom = malloc_or_die(32);
- sprintf(arg->atom.atom, "%lld", ip);
+
+ if (asprintf(&arg->atom.atom, "%lld", ip) < 0)
+ goto out_free;
/* skip the first "%pf : " */
for (ptr = fmt + 6, bptr = data + field->offset;
@@ -3606,10 +3784,17 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
val = pevent_read_number(pevent, bptr, vsize);
bptr += vsize;
arg = alloc_arg();
+ if (!arg) {
+ do_warning("%s(%d): not enough memory!",
+ __func__, __LINE__);
+ goto out_free;
+ }
arg->next = NULL;
arg->type = PRINT_ATOM;
- arg->atom.atom = malloc_or_die(32);
- sprintf(arg->atom.atom, "%lld", val);
+ if (asprintf(&arg->atom.atom, "%lld", val) < 0) {
+ free(arg);
+ goto out_free;
+ }
*next = arg;
next = &arg->next;
/*
@@ -3622,11 +3807,16 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
break;
case 's':
arg = alloc_arg();
+ if (!arg) {
+ do_warning("%s(%d): not enough memory!",
+ __func__, __LINE__);
+ goto out_free;
+ }
arg->next = NULL;
arg->type = PRINT_BSTRING;
arg->string.string = strdup(bptr);
if (!arg->string.string)
- break;
+ goto out_free;
bptr += strlen(bptr) + 1;
*next = arg;
next = &arg->next;
@@ -3637,22 +3827,15 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
}
return args;
-}
-static void free_args(struct print_arg *args)
-{
- struct print_arg *next;
-
- while (args) {
- next = args->next;
-
- free_arg(args);
- args = next;
- }
+out_free:
+ free_args(args);
+ return NULL;
}
static char *
-get_bprint_format(void *data, int size __unused, struct event_format *event)
+get_bprint_format(void *data, int size __maybe_unused,
+ struct event_format *event)
{
struct pevent *pevent = event->pevent;
unsigned long long addr;
@@ -3665,8 +3848,10 @@ get_bprint_format(void *data, int size __unused, struct event_format *event)
if (!field) {
field = pevent_find_field(event, "fmt");
- if (!field)
- die("can't find format field for binary printk");
+ if (!field) {
+ do_warning("can't find format field for binary printk");
+ return NULL;
+ }
pevent->bprint_fmt_field = field;
}
@@ -3674,9 +3859,8 @@ get_bprint_format(void *data, int size __unused, struct event_format *event)
printk = find_printk(pevent, addr);
if (!printk) {
- format = malloc_or_die(45);
- sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
- addr);
+ if (asprintf(&format, "%%pf : (NO FORMAT FOUND at %llx)\n", addr) < 0)
+ return NULL;
return format;
}
@@ -3684,8 +3868,8 @@ get_bprint_format(void *data, int size __unused, struct event_format *event)
/* Remove any quotes. */
if (*p == '"')
p++;
- format = malloc_or_die(strlen(p) + 10);
- sprintf(format, "%s : %s", "%pf", p);
+ if (asprintf(&format, "%s : %s", "%pf", p) < 0)
+ return NULL;
/* remove ending quotes and new line since we will add one too */
p = format + strlen(format) - 1;
if (*p == '"')
@@ -3720,8 +3904,11 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
if (!arg->field.field) {
arg->field.field =
pevent_find_any_field(event, arg->field.name);
- if (!arg->field.field)
- die("field %s not found", arg->field.name);
+ if (!arg->field.field) {
+ do_warning("%s: field %s not found",
+ __func__, arg->field.name);
+ return;
+ }
}
if (arg->field.field->size != 6) {
trace_seq_printf(s, "INVALIDMAC");
@@ -3888,8 +4075,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
goto cont_process;
case '*':
/* The argument is the length. */
- if (!arg)
- die("no argument match");
+ if (!arg) {
+ do_warning("no argument match");
+ event->flags |= EVENT_FL_FAILED;
+ goto out_failed;
+ }
len_arg = eval_num_arg(data, size, event, arg);
len_as_arg = 1;
arg = arg->next;
@@ -3922,15 +4112,21 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
case 'x':
case 'X':
case 'u':
- if (!arg)
- die("no argument match");
+ if (!arg) {
+ do_warning("no argument match");
+ event->flags |= EVENT_FL_FAILED;
+ goto out_failed;
+ }
len = ((unsigned long)ptr + 1) -
(unsigned long)saveptr;
/* should never happen */
- if (len > 31)
- die("bad format!");
+ if (len > 31) {
+ do_warning("bad format!");
+ event->flags |= EVENT_FL_FAILED;
+ len = 31;
+ }
memcpy(format, saveptr, len);
format[len] = 0;
@@ -3994,19 +4190,26 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
trace_seq_printf(s, format, (long long)val);
break;
default:
- die("bad count (%d)", ls);
+ do_warning("bad count (%d)", ls);
+ event->flags |= EVENT_FL_FAILED;
}
break;
case 's':
- if (!arg)
- die("no matching argument");
+ if (!arg) {
+ do_warning("no matching argument");
+ event->flags |= EVENT_FL_FAILED;
+ goto out_failed;
+ }
len = ((unsigned long)ptr + 1) -
(unsigned long)saveptr;
/* should never happen */
- if (len > 31)
- die("bad format!");
+ if (len > 31) {
+ do_warning("bad format!");
+ event->flags |= EVENT_FL_FAILED;
+ len = 31;
+ }
memcpy(format, saveptr, len);
format[len] = 0;
@@ -4024,6 +4227,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
trace_seq_putc(s, *ptr);
}
+ if (event->flags & EVENT_FL_FAILED) {
+out_failed:
+ trace_seq_printf(s, "[FAILED TO PARSE]");
+ }
+
if (args) {
free_args(args);
free(bprint_fmt);
@@ -4356,7 +4564,10 @@ get_event_fields(const char *type, const char *name,
struct format_field *field;
int i = 0;
- fields = malloc_or_die(sizeof(*fields) * (count + 1));
+ fields = malloc(sizeof(*fields) * (count + 1));
+ if (!fields)
+ return NULL;
+
for (field = list; field; field = field->next) {
fields[i++] = field;
if (i == count + 1) {
@@ -4672,8 +4883,7 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event)
}
/**
- * pevent_parse_event - parse the event format
- * @pevent: the handle to the pevent
+ * __pevent_parse_format - parse the event format
* @buf: the buffer storing the event format string
* @size: the size of @buf
* @sys: the system the event belongs to
@@ -4685,28 +4895,27 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event)
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-int pevent_parse_event(struct pevent *pevent,
- const char *buf, unsigned long size,
- const char *sys)
+enum pevent_errno __pevent_parse_format(struct event_format **eventp,
+ struct pevent *pevent, const char *buf,
+ unsigned long size, const char *sys)
{
struct event_format *event;
int ret;
init_input_buf(buf, size);
- event = alloc_event();
+ *eventp = event = alloc_event();
if (!event)
- return -ENOMEM;
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
event->name = event_read_name();
if (!event->name) {
/* Bad event? */
- free(event);
- return -1;
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ goto event_alloc_failed;
}
if (strcmp(sys, "ftrace") == 0) {
-
event->flags |= EVENT_FL_ISFTRACE;
if (strcmp(event->name, "bprint") == 0)
@@ -4714,74 +4923,189 @@ int pevent_parse_event(struct pevent *pevent,
}
event->id = event_read_id();
- if (event->id < 0)
- die("failed to read event id");
+ if (event->id < 0) {
+ ret = PEVENT_ERRNO__READ_ID_FAILED;
+ /*
+ * This isn't an allocation error actually.
+ * But as the ID is critical, just bail out.
+ */
+ goto event_alloc_failed;
+ }
event->system = strdup(sys);
- if (!event->system)
- die("failed to allocate system");
-
- /* Add pevent to event so that it can be referenced */
- event->pevent = pevent;
+ if (!event->system) {
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ goto event_alloc_failed;
+ }
ret = event_read_format(event);
if (ret < 0) {
- do_warning("failed to read event format for %s", event->name);
- goto event_failed;
+ ret = PEVENT_ERRNO__READ_FORMAT_FAILED;
+ goto event_parse_failed;
}
/*
* If the event has an override, don't print warnings if the event
* print format fails to parse.
*/
- if (find_event_handle(pevent, event))
+ if (pevent && find_event_handle(pevent, event))
show_warning = 0;
ret = event_read_print(event);
- if (ret < 0) {
- do_warning("failed to read event print fmt for %s",
- event->name);
- show_warning = 1;
- goto event_failed;
- }
show_warning = 1;
- add_event(pevent, event);
+ if (ret < 0) {
+ ret = PEVENT_ERRNO__READ_PRINT_FAILED;
+ goto event_parse_failed;
+ }
if (!ret && (event->flags & EVENT_FL_ISFTRACE)) {
struct format_field *field;
struct print_arg *arg, **list;
/* old ftrace had no args */
-
list = &event->print_fmt.args;
for (field = event->format.fields; field; field = field->next) {
arg = alloc_arg();
- *list = arg;
- list = &arg->next;
+ if (!arg) {
+ event->flags |= EVENT_FL_FAILED;
+ return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED;
+ }
arg->type = PRINT_FIELD;
arg->field.name = strdup(field->name);
if (!arg->field.name) {
- do_warning("failed to allocate field name");
event->flags |= EVENT_FL_FAILED;
- return -1;
+ free_arg(arg);
+ return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED;
}
arg->field.field = field;
+ *list = arg;
+ list = &arg->next;
}
return 0;
}
+ return 0;
+
+ event_parse_failed:
+ event->flags |= EVENT_FL_FAILED;
+ return ret;
+
+ event_alloc_failed:
+ free(event->system);
+ free(event->name);
+ free(event);
+ *eventp = NULL;
+ return ret;
+}
+
+/**
+ * pevent_parse_format - parse the event format
+ * @buf: the buffer storing the event format string
+ * @size: the size of @buf
+ * @sys: the system the event belongs to
+ *
+ * This parses the event format and creates an event structure
+ * to quickly parse raw data for a given event.
+ *
+ * These files currently come from:
+ *
+ * /sys/kernel/debug/tracing/events/.../.../format
+ */
+enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf,
+ unsigned long size, const char *sys)
+{
+ return __pevent_parse_format(eventp, NULL, buf, size, sys);
+}
+
+/**
+ * pevent_parse_event - parse the event format
+ * @pevent: the handle to the pevent
+ * @buf: the buffer storing the event format string
+ * @size: the size of @buf
+ * @sys: the system the event belongs to
+ *
+ * This parses the event format and creates an event structure
+ * to quickly parse raw data for a given event.
+ *
+ * These files currently come from:
+ *
+ * /sys/kernel/debug/tracing/events/.../.../format
+ */
+enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
+ unsigned long size, const char *sys)
+{
+ struct event_format *event = NULL;
+ int ret = __pevent_parse_format(&event, pevent, buf, size, sys);
+
+ if (event == NULL)
+ return ret;
+
+ /* Add pevent to event so that it can be referenced */
+ event->pevent = pevent;
+
+ if (add_event(pevent, event)) {
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ goto event_add_failed;
+ }
+
#define PRINT_ARGS 0
if (PRINT_ARGS && event->print_fmt.args)
print_args(event->print_fmt.args);
return 0;
- event_failed:
- event->flags |= EVENT_FL_FAILED;
- /* still add it even if it failed */
- add_event(pevent, event);
- return -1;
+event_add_failed:
+ pevent_free_format(event);
+ return ret;
+}
+
+#undef _PE
+#define _PE(code, str) str
+static const char * const pevent_error_str[] = {
+ PEVENT_ERRORS
+};
+#undef _PE
+
+int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
+ char *buf, size_t buflen)
+{
+ int idx;
+ const char *msg;
+
+ if (errnum >= 0) {
+ msg = strerror_r(errnum, buf, buflen);
+ if (msg != buf) {
+ size_t len = strlen(msg);
+ memcpy(buf, msg, min(buflen - 1, len));
+ *(buf + min(buflen - 1, len)) = '\0';
+ }
+ return 0;
+ }
+
+ if (errnum <= __PEVENT_ERRNO__START ||
+ errnum >= __PEVENT_ERRNO__END)
+ return -1;
+
+ idx = errnum - __PEVENT_ERRNO__START - 1;
+ msg = pevent_error_str[idx];
+
+ switch (errnum) {
+ case PEVENT_ERRNO__MEM_ALLOC_FAILED:
+ case PEVENT_ERRNO__PARSE_EVENT_FAILED:
+ case PEVENT_ERRNO__READ_ID_FAILED:
+ case PEVENT_ERRNO__READ_FORMAT_FAILED:
+ case PEVENT_ERRNO__READ_PRINT_FAILED:
+ case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED:
+ snprintf(buf, buflen, "%s", msg);
+ break;
+
+ default:
+ /* cannot reach here */
+ break;
+ }
+
+ return 0;
}
int get_field_val(struct trace_seq *s, struct format_field *field,
@@ -5000,6 +5324,7 @@ int pevent_register_print_function(struct pevent *pevent,
struct pevent_func_params *param;
enum pevent_func_arg_type type;
va_list ap;
+ int ret;
func_handle = find_func_handler(pevent, name);
if (func_handle) {
@@ -5012,14 +5337,20 @@ int pevent_register_print_function(struct pevent *pevent,
remove_func_handler(pevent, name);
}
- func_handle = malloc_or_die(sizeof(*func_handle));
- memset(func_handle, 0, sizeof(*func_handle));
+ func_handle = calloc(1, sizeof(*func_handle));
+ if (!func_handle) {
+ do_warning("Failed to allocate function handler");
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
func_handle->ret_type = ret_type;
func_handle->name = strdup(name);
func_handle->func = func;
- if (!func_handle->name)
- die("Failed to allocate function name");
+ if (!func_handle->name) {
+ do_warning("Failed to allocate function name");
+ free(func_handle);
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
next_param = &(func_handle->params);
va_start(ap, name);
@@ -5029,11 +5360,17 @@ int pevent_register_print_function(struct pevent *pevent,
break;
if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) {
- warning("Invalid argument type %d", type);
+ do_warning("Invalid argument type %d", type);
+ ret = PEVENT_ERRNO__INVALID_ARG_TYPE;
goto out_free;
}
- param = malloc_or_die(sizeof(*param));
+ param = malloc(sizeof(*param));
+ if (!param) {
+ do_warning("Failed to allocate function param");
+ ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ goto out_free;
+ }
param->type = type;
param->next = NULL;
@@ -5051,7 +5388,7 @@ int pevent_register_print_function(struct pevent *pevent,
out_free:
va_end(ap);
free_func_handle(func_handle);
- return -1;
+ return ret;
}
/**
@@ -5103,8 +5440,12 @@ int pevent_register_event_handler(struct pevent *pevent,
not_found:
/* Save for later use. */
- handle = malloc_or_die(sizeof(*handle));
- memset(handle, 0, sizeof(*handle));
+ handle = calloc(1, sizeof(*handle));
+ if (!handle) {
+ do_warning("Failed to allocate event handler");
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+ }
+
handle->id = id;
if (event_name)
handle->event_name = strdup(event_name);
@@ -5113,7 +5454,11 @@ int pevent_register_event_handler(struct pevent *pevent,
if ((event_name && !handle->event_name) ||
(sys_name && !handle->sys_name)) {
- die("Failed to allocate event/sys name");
+ do_warning("Failed to allocate event/sys name");
+ free((void *)handle->event_name);
+ free((void *)handle->sys_name);
+ free(handle);
+ return PEVENT_ERRNO__MEM_ALLOC_FAILED;
}
handle->func = func;
@@ -5129,13 +5474,10 @@ int pevent_register_event_handler(struct pevent *pevent,
*/
struct pevent *pevent_alloc(void)
{
- struct pevent *pevent;
+ struct pevent *pevent = calloc(1, sizeof(*pevent));
- pevent = malloc(sizeof(*pevent));
- if (!pevent)
- return NULL;
- memset(pevent, 0, sizeof(*pevent));
- pevent->ref_count = 1;
+ if (pevent)
+ pevent->ref_count = 1;
return pevent;
}
@@ -5164,7 +5506,7 @@ static void free_formats(struct format *format)
free_format_fields(format->fields);
}
-static void free_event(struct event_format *event)
+void pevent_free_format(struct event_format *event)
{
free(event->name);
free(event->system);
@@ -5250,7 +5592,7 @@ void pevent_free(struct pevent *pevent)
}
for (i = 0; i < pevent->nr_events; i++)
- free_event(pevent->events[i]);
+ pevent_free_format(pevent->events[i]);
while (pevent->handlers) {
handle = pevent->handlers;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 5772ad8cb38..24a4bbabc5d 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -24,8 +24,8 @@
#include <stdarg.h>
#include <regex.h>
-#ifndef __unused
-#define __unused __attribute__ ((unused))
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((unused))
#endif
/* ----------------------- trace_seq ----------------------- */
@@ -49,7 +49,7 @@ struct pevent_record {
int cpu;
int ref_count;
int locked; /* Do not free, even if ref_count is zero */
- void *private;
+ void *priv;
#if DEBUG_RECORD
struct pevent_record *prev;
struct pevent_record *next;
@@ -106,7 +106,7 @@ struct plugin_option {
char *plugin_alias;
char *description;
char *value;
- void *private;
+ void *priv;
int set;
};
@@ -345,6 +345,35 @@ enum pevent_flag {
PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */
};
+#define PEVENT_ERRORS \
+ _PE(MEM_ALLOC_FAILED, "failed to allocate memory"), \
+ _PE(PARSE_EVENT_FAILED, "failed to parse event"), \
+ _PE(READ_ID_FAILED, "failed to read event id"), \
+ _PE(READ_FORMAT_FAILED, "failed to read event format"), \
+ _PE(READ_PRINT_FAILED, "failed to read event print fmt"), \
+ _PE(OLD_FTRACE_ARG_FAILED,"failed to allocate field name for ftrace"),\
+ _PE(INVALID_ARG_TYPE, "invalid argument type")
+
+#undef _PE
+#define _PE(__code, __str) PEVENT_ERRNO__ ## __code
+enum pevent_errno {
+ PEVENT_ERRNO__SUCCESS = 0,
+
+ /*
+ * Choose an arbitrary negative big number not to clash with standard
+ * errno since SUS requires the errno has distinct positive values.
+ * See 'Issue 6' in the link below.
+ *
+ * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
+ */
+ __PEVENT_ERRNO__START = -100000,
+
+ PEVENT_ERRORS,
+
+ __PEVENT_ERRNO__END,
+};
+#undef _PE
+
struct cmdline;
struct cmdline_list;
struct func_map;
@@ -509,8 +538,11 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size,
int long_size);
-int pevent_parse_event(struct pevent *pevent, const char *buf,
- unsigned long size, const char *sys);
+enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
+ unsigned long size, const char *sys);
+enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf,
+ unsigned long size, const char *sys);
+void pevent_free_format(struct event_format *event);
void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
@@ -561,6 +593,8 @@ int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
void pevent_event_info(struct trace_seq *s, struct event_format *event,
struct pevent_record *record);
+int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
+ char *buf, size_t buflen);
struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type);
struct format_field **pevent_event_common_fields(struct event_format *event);
diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h
index 08296383d1e..bc075006966 100644
--- a/tools/lib/traceevent/event-utils.h
+++ b/tools/lib/traceevent/event-utils.h
@@ -39,6 +39,12 @@ void __vdie(const char *fmt, ...);
void __vwarning(const char *fmt, ...);
void __vpr_stat(const char *fmt, ...);
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+
static inline char *strim(char *string)
{
char *ret;
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 26b823b61aa..8f8fbc227a4 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -21,3 +21,5 @@ config.mak
config.mak.autogen
*-bison.*
*-flex.*
+*.pyc
+*.pyo
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index ca600e09c8d..9f2e44f2b17 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -195,10 +195,10 @@ install-pdf: pdf
#install-html: html
# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
-../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
- $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
+ $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE
--include ../PERF-VERSION-FILE
+-include $(OUTPUT)PERF-VERSION-FILE
#
# Determine "include::" file references in asciidoc files.
diff --git a/tools/perf/Documentation/jit-interface.txt b/tools/perf/Documentation/jit-interface.txt
new file mode 100644
index 00000000000..a8656f56491
--- /dev/null
+++ b/tools/perf/Documentation/jit-interface.txt
@@ -0,0 +1,15 @@
+perf supports a simple JIT interface to resolve symbols for dynamic code generated
+by a JIT.
+
+The JIT has to write a /tmp/perf-%d.map (%d = pid of process) file
+
+This is a text file.
+
+Each line has the following format, fields separated with spaces:
+
+START SIZE symbolname
+
+START and SIZE are hex numbers without 0x.
+symbolname is the rest of the line, so it could contain special characters.
+
+The ownership of the file has to match the process.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index c89f9e1453f..c8ffd9fd5c6 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -85,6 +85,9 @@ OPTIONS
-M::
--disassembler-style=:: Set disassembler style for objdump.
+--objdump=<path>::
+ Path to objdump binary.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index 74d7481ed7a..ab7f667de1b 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -17,6 +17,9 @@ captured via perf record.
If no parameters are passed it will assume perf.data.old and perf.data.
+The differential profile is displayed only for events matching both
+specified perf.data files.
+
OPTIONS
-------
-M::
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index dd84cb2f0a8..326f2cb333c 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -12,7 +12,7 @@ SYNOPSIS
[--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
{top|record|report|diff|buildid-list}
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
- | --guestvmlinux=<path>] {top|record|report|diff|buildid-list}
+ | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat}
DESCRIPTION
-----------
@@ -38,6 +38,18 @@ There are a couple of variants of perf kvm:
so that other tools can be used to fetch packages with matching symbol tables
for use by perf report.
+ 'perf kvm stat <command>' to run a command and gather performance counter
+ statistics.
+ Especially, perf 'kvm stat record/report' generates a statistical analysis
+ of KVM events. Currently, vmexit, mmio and ioport events are supported.
+ 'perf kvm stat record <command>' records kvm events and the events between
+ start and end <command>.
+ And this command produces a file which contains tracing results of kvm
+ events.
+
+ 'perf kvm stat report' reports statistical data which includes events
+ handled time, samples, and so on.
+
OPTIONS
-------
-i::
@@ -68,7 +80,21 @@ OPTIONS
--guestvmlinux=<path>::
Guest os kernel vmlinux.
+STAT REPORT OPTIONS
+-------------------
+--vcpu=<value>::
+ analyze events which occures on this vcpu. (default: all vcpus)
+
+--events=<value>::
+ events to be analyzed. Possible values: vmexit, mmio, ioport.
+ (default: vmexit)
+-k::
+--key=<value>::
+ Sorting key. Possible values: sample (default, sort by samples
+ number), time (sort by average time).
+
SEE ALSO
--------
linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
-linkperf:perf-diff[1], linkperf:perf-buildid-list[1]
+linkperf:perf-diff[1], linkperf:perf-buildid-list[1],
+linkperf:perf-stat[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index ddc22525228..d1e39dc8c81 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -15,24 +15,43 @@ DESCRIPTION
This command displays the symbolic event types which can be selected in the
various perf commands with the -e option.
+[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
Events can optionally have a modifer by appending a colon and one or
-more modifiers. Modifiers allow the user to restrict when events are
-counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor.
-Additional modifiers are 'G' for guest counting (in KVM guests) and 'H'
-for host counting (not in KVM guests).
+more modifiers. Modifiers allow the user to restrict the events to be
+counted. The following modifiers exist:
+
+ u - user-space counting
+ k - kernel counting
+ h - hypervisor counting
+ G - guest counting (in KVM guests)
+ H - host counting (not in KVM guests)
+ p - precise level
The 'p' modifier can be used for specifying how precise the instruction
-address should be. The 'p' modifier is currently only implemented for
-Intel PEBS and can be specified multiple times:
- 0 - SAMPLE_IP can have arbitrary skid
- 1 - SAMPLE_IP must have constant skid
- 2 - SAMPLE_IP requested to have 0 skid
- 3 - SAMPLE_IP must have 0 skid
+address should be. The 'p' modifier can be specified multiple times:
+
+ 0 - SAMPLE_IP can have arbitrary skid
+ 1 - SAMPLE_IP must have constant skid
+ 2 - SAMPLE_IP requested to have 0 skid
+ 3 - SAMPLE_IP must have 0 skid
+
+For Intel systems precise event sampling is implemented with PEBS
+which supports up to precise-level 2.
-The PEBS implementation now supports up to 2.
+On AMD systems it is implemented using IBS (up to precise-level 2).
+The precise modifier works with event types 0x76 (cpu-cycles, CPU
+clocks not halted) and 0xC1 (micro-ops retired). Both events map to
+IBS execution sampling (IBS op) with the IBS Op Counter Control bit
+(IbsOpCntCtl) set respectively (see AMD64 Architecture Programmer’s
+Manual Volume 2: System Programming, 13.3 Instruction-Based
+Sampling). Examples to use IBS:
+
+ perf record -a -e cpu-cycles:p ... # use ibs op counting cycles
+ perf record -a -e r076:p ... # same as -e cpu-cycles:p
+ perf record -a -e r0C1:p ... # use ibs op counting micro-ops
RAW HARDWARE EVENT DESCRIPTOR
-----------------------------
@@ -44,6 +63,11 @@ layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Softwar
of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344,
Figure 13-7 Performance Event-Select Register (PerfEvtSeln)).
+Note: Only the following bit fields can be set in x86 counter
+registers: event, umask, edge, inv, cmask. Esp. guest/host only and
+OS/user mode flags must be setup using <<EVENT_MODIFIERS, EVENT
+MODIFIERS>>.
+
Example:
If the Intel docs for a QM720 Core i7 describe an event as:
@@ -91,4 +115,4 @@ SEE ALSO
linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1],
http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
-http://support.amd.com/us/Processor_TechDocs/24593.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
+http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 495210a612c..f4d91bebd59 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -168,6 +168,9 @@ OPTIONS
branch stacks and it will automatically switch to the branch view mode,
unless --no-branch-stack is used.
+--objdump=<path>::
+ Path to objdump binary.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-annotate[1]
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index 3152cca1550..d00bef23134 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -116,8 +116,8 @@ search path and 'use'ing a few support modules (see module
descriptions below):
----
- use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/perf-script-Util/lib";
- use lib "./perf-script-Util/lib";
+ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+ use lib "./Perf-Trace-Util/lib";
use Perf::Trace::Core;
use Perf::Trace::Context;
use Perf::Trace::Util;
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 47102206911..a4027f221a5 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -129,7 +129,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -216,7 +216,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -279,7 +279,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -391,7 +391,7 @@ drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin
-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-script.py
-drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 perf-script-Util
+drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util
-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py
----
@@ -518,7 +518,7 @@ descriptions below):
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
new file mode 100644
index 00000000000..3a2ae37310a
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -0,0 +1,53 @@
+perf-trace(1)
+=============
+
+NAME
+----
+perf-trace - strace inspired tool
+
+SYNOPSIS
+--------
+[verse]
+'perf trace'
+
+DESCRIPTION
+-----------
+This command will show the events associated with the target, initially
+syscalls, but other system events like pagefaults, task lifetime events,
+scheduling events, etc.
+
+Initially this is a live mode only tool, but eventually will work with
+perf.data files like the other tools, allowing a detached 'record' from
+analysis phases.
+
+OPTIONS
+-------
+
+--all-cpus::
+ System-wide collection from all CPUs.
+
+-p::
+--pid=::
+ Record events on existing process ID (comma separated list).
+
+--tid=::
+ Record events on existing thread ID (comma separated list).
+
+--uid=::
+ Record events in threads owned by uid. Name or number.
+
+--no-inherit::
+ Child tasks do not inherit counters.
+
+--mmap-pages=::
+ Number of mmap data pages. Must be a power of two.
+
+--cpu::
+Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a
+comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+In per-thread mode with inheritance mode on (default), Events are captured only when
+the thread executes on the designated CPUs. Default is to monitor all CPUs.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-script[1]
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index b4b572e8c10..80db3f4bcf7 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -10,8 +10,12 @@ include/linux/stringify.h
lib/rbtree.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
+arch/*/include/asm/perf_regs.h
arch/*/lib/memcpy*.S
arch/*/lib/memset*.S
include/linux/poison.h
include/linux/magic.h
include/linux/hw_breakpoint.h
+arch/x86/include/asm/svm.h
+arch/x86/include/asm/vmx.h
+arch/x86/include/asm/kvm_host.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 35655c3a7b7..e5e71e7d95a 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -37,7 +37,14 @@ include config/utilities.mak
#
# Define NO_NEWT if you do not want TUI support.
#
+# Define NO_GTK2 if you do not want GTK+ GUI support.
+#
# Define NO_DEMANGLE if you do not want C++ symbol demangling.
+#
+# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds)
+#
+# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf
+# backtrace post unwind.
$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
@@ -50,16 +57,19 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ )
+NO_PERF_REGS := 1
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
# Additional ARCH settings for x86
ifeq ($(ARCH),i386)
- ARCH := x86
+ override ARCH := x86
+ NO_PERF_REGS := 0
+ LIBUNWIND_LIBS = -lunwind -lunwind-x86
endif
ifeq ($(ARCH),x86_64)
- ARCH := x86
+ override ARCH := x86
IS_X86_64 := 0
ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
@@ -69,6 +79,8 @@ ifeq ($(ARCH),x86_64)
ARCH_CFLAGS := -DARCH_X86_64
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
endif
+ NO_PERF_REGS := 0
+ LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
endif
# Treat warnings as errors unless directed not to
@@ -89,7 +101,7 @@ ifdef PARSER_DEBUG
PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
endif
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
ALL_LDFLAGS = $(LDFLAGS)
@@ -186,10 +198,10 @@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
TRACE_EVENT_DIR = ../lib/traceevent/
-ifeq ("$(origin O)", "command line")
- TE_PATH=$(OUTPUT)/
+ifneq ($(OUTPUT),)
+ TE_PATH=$(OUTPUT)
else
- TE_PATH=$(TRACE_EVENT_DIR)/
+ TE_PATH=$(TRACE_EVENT_DIR)
endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
@@ -221,13 +233,13 @@ export PERL_PATH
FLEX = flex
BISON= bison
-$(OUTPUT)util/parse-events-flex.c: util/parse-events.l
+$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
$(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c
-$(OUTPUT)util/pmu-flex.c: util/pmu.l
+$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
$(OUTPUT)util/pmu-bison.c: util/pmu.y
@@ -252,6 +264,7 @@ LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
LIB_H += util/include/linux/export.h
+LIB_H += util/include/linux/magic.h
LIB_H += util/include/linux/poison.h
LIB_H += util/include/linux/prefetch.h
LIB_H += util/include/linux/rbtree.h
@@ -321,6 +334,10 @@ LIB_H += $(TRACE_EVENT_DIR)event-parse.h
LIB_H += util/target.h
LIB_H += util/rblist.h
LIB_H += util/intlist.h
+LIB_H += util/perf_regs.h
+LIB_H += util/unwind.h
+LIB_H += ui/helpline.h
+LIB_H += util/vdso.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
@@ -356,6 +373,7 @@ LIB_OBJS += $(OUTPUT)util/usage.o
LIB_OBJS += $(OUTPUT)util/wrapper.o
LIB_OBJS += $(OUTPUT)util/sigchain.o
LIB_OBJS += $(OUTPUT)util/symbol.o
+LIB_OBJS += $(OUTPUT)util/symbol-elf.o
LIB_OBJS += $(OUTPUT)util/dso-test-data.o
LIB_OBJS += $(OUTPUT)util/color.o
LIB_OBJS += $(OUTPUT)util/pager.o
@@ -387,11 +405,15 @@ LIB_OBJS += $(OUTPUT)util/cgroup.o
LIB_OBJS += $(OUTPUT)util/target.o
LIB_OBJS += $(OUTPUT)util/rblist.o
LIB_OBJS += $(OUTPUT)util/intlist.o
+LIB_OBJS += $(OUTPUT)util/vdso.o
+LIB_OBJS += $(OUTPUT)util/stat.o
-BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
+LIB_OBJS += $(OUTPUT)ui/helpline.o
+LIB_OBJS += $(OUTPUT)ui/hist.o
+LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
+BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
-
# Benchmark modules
BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
@@ -449,34 +471,73 @@ PYRF_OBJS += $(OUTPUT)util/xyarray.o
-include config.mak.autogen
-include config.mak
-ifndef NO_DWARF
-FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
-ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
- msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+ifdef NO_LIBELF
NO_DWARF := 1
-endif # Dwarf support
-endif # NO_DWARF
-
--include arch/$(ARCH)/Makefile
-
-ifneq ($(OUTPUT),)
- BASIC_CFLAGS += -I$(OUTPUT)
-endif
-
+ NO_DEMANGLE := 1
+ NO_LIBUNWIND := 1
+else
FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y)
FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS)
ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y)
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
else
- msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
+ NO_LIBELF := 1
+ NO_DWARF := 1
+ NO_DEMANGLE := 1
endif
endif
+endif # NO_LIBELF
+
+ifndef NO_LIBUNWIND
+# for linking with debug library, run like:
+# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+ifdef LIBUNWIND_DIR
+ LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include
+ LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
+endif
+
+FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(ALL_CFLAGS) $(LIBUNWIND_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
+ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND)),y)
+ msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
+ NO_LIBUNWIND := 1
+endif # Libunwind support
+endif # NO_LIBUNWIND
+
+-include arch/$(ARCH)/Makefile
+
+ifneq ($(OUTPUT),)
+ BASIC_CFLAGS += -I$(OUTPUT)
+endif
+
+ifdef NO_LIBELF
+BASIC_CFLAGS += -DNO_LIBELF_SUPPORT
+
+EXTLIBS := $(filter-out -lelf,$(EXTLIBS))
+
+# Remove ELF/DWARF dependent codes
+LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS))
+
+BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS))
+
+# Use minimal symbol handling
+LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
+
+else # NO_LIBELF
ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y)
BASIC_CFLAGS += -DLIBELF_NO_MMAP
endif
+FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
+ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
+ msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+ NO_DWARF := 1
+endif # Dwarf support
+
ifndef NO_DWARF
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
@@ -487,6 +548,29 @@ else
LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
endif # PERF_HAVE_DWARF_REGS
endif # NO_DWARF
+endif # NO_LIBELF
+
+ifdef NO_LIBUNWIND
+ BASIC_CFLAGS += -DNO_LIBUNWIND_SUPPORT
+else
+ EXTLIBS += $(LIBUNWIND_LIBS)
+ BASIC_CFLAGS := $(LIBUNWIND_CFLAGS) $(BASIC_CFLAGS)
+ BASIC_LDFLAGS := $(LIBUNWIND_LDFLAGS) $(BASIC_LDFLAGS)
+ LIB_OBJS += $(OUTPUT)util/unwind.o
+endif
+
+ifdef NO_LIBAUDIT
+ BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT
+else
+ FLAGS_LIBAUDIT = $(ALL_CFLAGS) $(ALL_LDFLAGS) -laudit
+ ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT)),y)
+ msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
+ BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT
+ else
+ BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
+ EXTLIBS += -laudit
+ endif
+endif
ifdef NO_NEWT
BASIC_CFLAGS += -DNO_NEWT_SUPPORT
@@ -504,14 +588,13 @@ else
LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)ui/browsers/map.o
- LIB_OBJS += $(OUTPUT)ui/helpline.o
LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)ui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/setup.o
LIB_OBJS += $(OUTPUT)ui/tui/util.o
+ LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
LIB_H += ui/browser.h
LIB_H += ui/browsers/map.h
- LIB_H += ui/helpline.h
LIB_H += ui/keysyms.h
LIB_H += ui/libslang.h
LIB_H += ui/progress.h
@@ -523,7 +606,7 @@ endif
ifdef NO_GTK2
BASIC_CFLAGS += -DNO_GTK2_SUPPORT
else
- FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
+ FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
BASIC_CFLAGS += -DNO_GTK2_SUPPORT
@@ -531,11 +614,12 @@ else
ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y)
BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR
endif
- BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
- EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
+ BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
+ EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
LIB_OBJS += $(OUTPUT)ui/gtk/util.o
+ LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
# Make sure that it'd be included only once.
ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
LIB_OBJS += $(OUTPUT)ui/setup.o
@@ -644,7 +728,7 @@ else
EXTLIBS += -liberty
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
else
- FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd
+ FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd
has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD))
ifeq ($(has_bfd),y)
EXTLIBS += -lbfd
@@ -674,6 +758,13 @@ else
endif
endif
+ifeq ($(NO_PERF_REGS),0)
+ ifeq ($(ARCH),x86)
+ LIB_H += arch/x86/include/perf_regs.h
+ endif
+else
+ BASIC_CFLAGS += -DNO_PERF_REGS
+endif
ifdef NO_STRLCPY
BASIC_CFLAGS += -DNO_STRLCPY
@@ -683,6 +774,14 @@ else
endif
endif
+ifdef NO_BACKTRACE
+ BASIC_CFLAGS += -DNO_BACKTRACE
+else
+ ifneq ($(call try-cc,$(SOURCE_BACKTRACE),),y)
+ BASIC_CFLAGS += -DNO_BACKTRACE
+ endif
+endif
+
ifdef ASCIIDOC8
export ASCIIDOC8
endif
@@ -700,6 +799,7 @@ perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
htmldir_SQ = $(subst ','\'',$(htmldir))
prefix_SQ = $(subst ','\'',$(prefix))
+sysconfdir_SQ = $(subst ','\'',$(sysconfdir))
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
@@ -767,10 +867,10 @@ $(OUTPUT)perf.o perf.spec \
# over the general rule for .o
$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
+ $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -w $<
$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
+ $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $<
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
@@ -842,7 +942,10 @@ $(LIB_FILE): $(LIB_OBJS)
# libtraceevent.a
$(LIBTRACEEVENT):
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) $(COMMAND_O) libtraceevent.a
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a
+
+$(LIBTRACEEVENT)-clean:
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
help:
@echo 'Perf make targets:'
@@ -951,6 +1054,8 @@ install: all
$(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'
+ $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
@@ -981,7 +1086,7 @@ quick-install-html:
### Cleaning rules
-clean:
+clean: $(LIBTRACEEVENT)-clean
$(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS)
$(RM) $(ALL_PROGRAMS) perf
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 744e629797b..815841c04eb 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -2,4 +2,7 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
+ifndef NO_LIBUNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+endif
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
new file mode 100644
index 00000000000..46fc9f15c6b
--- /dev/null
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -0,0 +1,80 @@
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include "../../util/types.h"
+#include "../../../../../arch/x86/include/asm/perf_regs.h"
+
+#ifndef ARCH_X86_64
+#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
+#else
+#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
+ (1ULL << PERF_REG_X86_ES) | \
+ (1ULL << PERF_REG_X86_FS) | \
+ (1ULL << PERF_REG_X86_GS))
+#define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
+#endif
+#define PERF_REG_IP PERF_REG_X86_IP
+#define PERF_REG_SP PERF_REG_X86_SP
+
+static inline const char *perf_reg_name(int id)
+{
+ switch (id) {
+ case PERF_REG_X86_AX:
+ return "AX";
+ case PERF_REG_X86_BX:
+ return "BX";
+ case PERF_REG_X86_CX:
+ return "CX";
+ case PERF_REG_X86_DX:
+ return "DX";
+ case PERF_REG_X86_SI:
+ return "SI";
+ case PERF_REG_X86_DI:
+ return "DI";
+ case PERF_REG_X86_BP:
+ return "BP";
+ case PERF_REG_X86_SP:
+ return "SP";
+ case PERF_REG_X86_IP:
+ return "IP";
+ case PERF_REG_X86_FLAGS:
+ return "FLAGS";
+ case PERF_REG_X86_CS:
+ return "CS";
+ case PERF_REG_X86_SS:
+ return "SS";
+ case PERF_REG_X86_DS:
+ return "DS";
+ case PERF_REG_X86_ES:
+ return "ES";
+ case PERF_REG_X86_FS:
+ return "FS";
+ case PERF_REG_X86_GS:
+ return "GS";
+#ifdef ARCH_X86_64
+ case PERF_REG_X86_R8:
+ return "R8";
+ case PERF_REG_X86_R9:
+ return "R9";
+ case PERF_REG_X86_R10:
+ return "R10";
+ case PERF_REG_X86_R11:
+ return "R11";
+ case PERF_REG_X86_R12:
+ return "R12";
+ case PERF_REG_X86_R13:
+ return "R13";
+ case PERF_REG_X86_R14:
+ return "R14";
+ case PERF_REG_X86_R15:
+ return "R15";
+#endif /* ARCH_X86_64 */
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/x86/util/unwind.c b/tools/perf/arch/x86/util/unwind.c
new file mode 100644
index 00000000000..78d956eff96
--- /dev/null
+++ b/tools/perf/arch/x86/util/unwind.c
@@ -0,0 +1,111 @@
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+
+#ifdef ARCH_X86_64
+int unwind__arch_reg_id(int regnum)
+{
+ int id;
+
+ switch (regnum) {
+ case UNW_X86_64_RAX:
+ id = PERF_REG_X86_AX;
+ break;
+ case UNW_X86_64_RDX:
+ id = PERF_REG_X86_DX;
+ break;
+ case UNW_X86_64_RCX:
+ id = PERF_REG_X86_CX;
+ break;
+ case UNW_X86_64_RBX:
+ id = PERF_REG_X86_BX;
+ break;
+ case UNW_X86_64_RSI:
+ id = PERF_REG_X86_SI;
+ break;
+ case UNW_X86_64_RDI:
+ id = PERF_REG_X86_DI;
+ break;
+ case UNW_X86_64_RBP:
+ id = PERF_REG_X86_BP;
+ break;
+ case UNW_X86_64_RSP:
+ id = PERF_REG_X86_SP;
+ break;
+ case UNW_X86_64_R8:
+ id = PERF_REG_X86_R8;
+ break;
+ case UNW_X86_64_R9:
+ id = PERF_REG_X86_R9;
+ break;
+ case UNW_X86_64_R10:
+ id = PERF_REG_X86_R10;
+ break;
+ case UNW_X86_64_R11:
+ id = PERF_REG_X86_R11;
+ break;
+ case UNW_X86_64_R12:
+ id = PERF_REG_X86_R12;
+ break;
+ case UNW_X86_64_R13:
+ id = PERF_REG_X86_R13;
+ break;
+ case UNW_X86_64_R14:
+ id = PERF_REG_X86_R14;
+ break;
+ case UNW_X86_64_R15:
+ id = PERF_REG_X86_R15;
+ break;
+ case UNW_X86_64_RIP:
+ id = PERF_REG_X86_IP;
+ break;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+
+ return id;
+}
+#else
+int unwind__arch_reg_id(int regnum)
+{
+ int id;
+
+ switch (regnum) {
+ case UNW_X86_EAX:
+ id = PERF_REG_X86_AX;
+ break;
+ case UNW_X86_EDX:
+ id = PERF_REG_X86_DX;
+ break;
+ case UNW_X86_ECX:
+ id = PERF_REG_X86_CX;
+ break;
+ case UNW_X86_EBX:
+ id = PERF_REG_X86_BX;
+ break;
+ case UNW_X86_ESI:
+ id = PERF_REG_X86_SI;
+ break;
+ case UNW_X86_EDI:
+ id = PERF_REG_X86_DI;
+ break;
+ case UNW_X86_EBP:
+ id = PERF_REG_X86_BP;
+ break;
+ case UNW_X86_ESP:
+ id = PERF_REG_X86_SP;
+ break;
+ case UNW_X86_EIP:
+ id = PERF_REG_X86_IP;
+ break;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+
+ return id;
+}
+#endif /* ARCH_X86_64 */
diff --git a/tools/perf/bash_completion b/tools/perf/bash_completion
new file mode 100644
index 00000000000..1958fa539d0
--- /dev/null
+++ b/tools/perf/bash_completion
@@ -0,0 +1,26 @@
+# perf completion
+
+have perf &&
+_perf()
+{
+ local cur cmd
+
+ COMPREPLY=()
+ _get_comp_words_by_ref cur prev
+
+ cmd=${COMP_WORDS[0]}
+
+ # List perf subcommands
+ if [ $COMP_CWORD -eq 1 ]; then
+ cmds=$($cmd --list-cmds)
+ COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) )
+ # List possible events for -e option
+ elif [[ $prev == "-e" && "${COMP_WORDS[1]}" == @(record|stat|top) ]]; then
+ cmds=$($cmd list --raw-dump)
+ COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) )
+ # Fall down to list regular files
+ else
+ _filedir
+ fi
+} &&
+complete -F _perf perf
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index a09bece6dad..8f89998eeaf 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -3,7 +3,8 @@
extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
-extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used);
+extern int bench_mem_memcpy(int argc, const char **argv,
+ const char *prefix __maybe_unused);
extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
#define BENCH_FORMAT_DEFAULT_STR "default"
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 02dad5d3359..93c83e3cb4a 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -177,7 +177,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
} while (0)
int bench_mem_memcpy(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
int i;
size_t len;
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
index 350cc955726..c6e4bc52349 100644
--- a/tools/perf/bench/mem-memset.c
+++ b/tools/perf/bench/mem-memset.c
@@ -171,7 +171,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault)
} while (0)
int bench_mem_memset(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
int i;
size_t len;
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index d1d1b30f99c..cc1190a0849 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -267,7 +267,7 @@ static const char * const bench_sched_message_usage[] = {
};
int bench_sched_messaging(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
unsigned int i, total_children;
struct timeval start, stop, diff;
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 0c7454f8b8a..69cfba8d4c6 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -43,7 +43,7 @@ static const char * const bench_sched_pipe_usage[] = {
};
int bench_sched_pipe(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
int pipe_1[2], pipe_2[2];
int m = 0, i;
@@ -55,14 +55,14 @@ int bench_sched_pipe(int argc, const char **argv,
* discarding returned value of read(), write()
* causes error in building environment for perf
*/
- int __used ret, wait_stat;
- pid_t pid, retpid;
+ int __maybe_unused ret, wait_stat;
+ pid_t pid, retpid __maybe_unused;
argc = parse_options(argc, argv, options,
bench_sched_pipe_usage, 0);
- assert(!pipe(pipe_1));
- assert(!pipe(pipe_2));
+ BUG_ON(pipe(pipe_1));
+ BUG_ON(pipe(pipe_2));
pid = fork();
assert(pid >= 0);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 67522cf8740..9ea38540b87 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -239,7 +239,7 @@ static const char * const annotate_usage[] = {
NULL
};
-int cmd_annotate(int argc, const char **argv, const char *prefix __used)
+int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_annotate annotate = {
.tool = {
@@ -282,6 +282,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_END()
};
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 1f310021644..cae9a5fd2ec 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -173,7 +173,7 @@ static void all_subsystem(void)
all_suite(&subsystems[i]);
}
-int cmd_bench(int argc, const char **argv, const char *prefix __used)
+int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
{
int i, j, status = 0;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 29ad20e6791..83654557e10 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -43,15 +43,16 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir)
}
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
- err = build_id_cache__add_s(sbuild_id, debugdir, filename, false);
+ err = build_id_cache__add_s(sbuild_id, debugdir, filename,
+ false, false);
if (verbose)
pr_info("Adding %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
return err;
}
-static int build_id_cache__remove_file(const char *filename __used,
- const char *debugdir __used)
+static int build_id_cache__remove_file(const char *filename __maybe_unused,
+ const char *debugdir __maybe_unused)
{
u8 build_id[BUILD_ID_SIZE];
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
@@ -119,7 +120,8 @@ static int __cmd_buildid_cache(void)
return 0;
}
-int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used)
+int cmd_buildid_cache(int argc, const char **argv,
+ const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, buildid_cache_options,
buildid_cache_usage, 0);
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 6b2bcfbde15..1159feeebb1 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -16,8 +16,6 @@
#include "util/session.h"
#include "util/symbol.h"
-#include <libelf.h>
-
static const char *input_name;
static bool force;
static bool show_kernel;
@@ -71,7 +69,7 @@ static int perf_session__list_build_ids(void)
{
struct perf_session *session;
- elf_version(EV_CURRENT);
+ symbol__elf_init();
session = perf_session__new(input_name, O_RDONLY, force, false,
&build_id__mark_dso_hit_ops);
@@ -105,7 +103,8 @@ static int __cmd_buildid_list(void)
return perf_session__list_build_ids();
}
-int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
+int cmd_buildid_list(int argc, const char **argv,
+ const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
setup_pager();
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index d29d350fb2b..761f4197a9e 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -10,6 +10,7 @@
#include "util/event.h"
#include "util/hist.h"
#include "util/evsel.h"
+#include "util/evlist.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/sort.h"
@@ -24,11 +25,6 @@ static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
-struct perf_diff {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
static int hists__add_entry(struct hists *self,
struct addr_location *al, u64 period)
{
@@ -37,14 +33,12 @@ static int hists__add_entry(struct hists *self,
return -ENOMEM;
}
-static int diff__process_sample_event(struct perf_tool *tool,
+static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel,
struct machine *machine)
{
- struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
- struct perf_session *session = _diff->session;
struct addr_location al;
if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
@@ -56,26 +50,24 @@ static int diff__process_sample_event(struct perf_tool *tool,
if (al.filtered || al.sym == NULL)
return 0;
- if (hists__add_entry(&session->hists, &al, sample->period)) {
+ if (hists__add_entry(&evsel->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_diff diff = {
- .tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .lost = perf_event__process_lost,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
- },
+static struct perf_tool tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -146,34 +138,71 @@ static void hists__match(struct hists *older, struct hists *newer)
}
}
+static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *e;
+
+ list_for_each_entry(e, &evlist->entries, node)
+ if (perf_evsel__match2(evsel, e))
+ return e;
+
+ return NULL;
+}
+
static int __cmd_diff(void)
{
int ret, i;
#define older (session[0])
#define newer (session[1])
struct perf_session *session[2];
+ struct perf_evlist *evlist_new, *evlist_old;
+ struct perf_evsel *evsel;
+ bool first = true;
older = perf_session__new(input_old, O_RDONLY, force, false,
- &diff.tool);
+ &tool);
newer = perf_session__new(input_new, O_RDONLY, force, false,
- &diff.tool);
+ &tool);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
- diff.session = session[i];
- ret = perf_session__process_events(session[i], &diff.tool);
+ ret = perf_session__process_events(session[i], &tool);
if (ret)
goto out_delete;
- hists__output_resort(&session[i]->hists);
}
- if (show_displacement)
- hists__resort_entries(&older->hists);
+ evlist_old = older->evlist;
+ evlist_new = newer->evlist;
+
+ list_for_each_entry(evsel, &evlist_new->entries, node)
+ hists__output_resort(&evsel->hists);
+
+ list_for_each_entry(evsel, &evlist_old->entries, node) {
+ hists__output_resort(&evsel->hists);
+
+ if (show_displacement)
+ hists__resort_entries(&evsel->hists);
+ }
+
+ list_for_each_entry(evsel, &evlist_new->entries, node) {
+ struct perf_evsel *evsel_old;
+
+ evsel_old = evsel_match(evsel, evlist_old);
+ if (!evsel_old)
+ continue;
+
+ fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n",
+ perf_evsel__name(evsel));
+
+ first = false;
+
+ hists__match(&evsel_old->hists, &evsel->hists);
+ hists__fprintf(&evsel->hists, &evsel_old->hists,
+ show_displacement, true, 0, 0, stdout);
+ }
- hists__match(&older->hists, &newer->hists);
- hists__fprintf(&newer->hists, &older->hists,
- show_displacement, true, 0, 0, stdout);
out_delete:
for (i = 0; i < 2; ++i)
perf_session__delete(session[i]);
@@ -213,7 +242,7 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_diff(int argc, const char **argv, const char *prefix __used)
+int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
{
sort_order = diff__default_sort_order;
argc = parse_options(argc, argv, options, diff_usage, 0);
@@ -235,6 +264,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __used)
if (symbol__init() < 0)
return -1;
+ perf_hpp__init(true, show_displacement);
setup_sorting(diff_usage, options);
setup_pager();
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 0dd5a058f76..1fb164164fd 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -113,7 +113,7 @@ static const char * const evlist_usage[] = {
NULL
};
-int cmd_evlist(int argc, const char **argv, const char *prefix __used)
+int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_attr_details details = { .verbose = false, };
const char *input_name = NULL;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 6d5a8a7faf4..25c8b942ff8 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -24,13 +24,14 @@ static struct man_viewer_info_list {
} *man_viewer_info_list;
enum help_format {
+ HELP_FORMAT_NONE,
HELP_FORMAT_MAN,
HELP_FORMAT_INFO,
HELP_FORMAT_WEB,
};
static bool show_all = false;
-static enum help_format help_format = HELP_FORMAT_MAN;
+static enum help_format help_format = HELP_FORMAT_NONE;
static struct option builtin_help_options[] = {
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
@@ -54,7 +55,9 @@ static enum help_format parse_help_format(const char *format)
return HELP_FORMAT_INFO;
if (!strcmp(format, "web") || !strcmp(format, "html"))
return HELP_FORMAT_WEB;
- die("unrecognized help format '%s'", format);
+
+ pr_err("unrecognized help format '%s'", format);
+ return HELP_FORMAT_NONE;
}
static const char *get_man_viewer_info(const char *name)
@@ -259,6 +262,8 @@ static int perf_help_config(const char *var, const char *value, void *cb)
if (!value)
return config_error_nonbool(var);
help_format = parse_help_format(value);
+ if (help_format == HELP_FORMAT_NONE)
+ return -1;
return 0;
}
if (!strcmp(var, "man.viewer")) {
@@ -352,7 +357,7 @@ static void exec_viewer(const char *name, const char *page)
warning("'%s': unknown man viewer.", name);
}
-static void show_man_page(const char *perf_cmd)
+static int show_man_page(const char *perf_cmd)
{
struct man_viewer_list *viewer;
const char *page = cmd_to_page(perf_cmd);
@@ -365,28 +370,35 @@ static void show_man_page(const char *perf_cmd)
if (fallback)
exec_viewer(fallback, page);
exec_viewer("man", page);
- die("no man viewer handled the request");
+
+ pr_err("no man viewer handled the request");
+ return -1;
}
-static void show_info_page(const char *perf_cmd)
+static int show_info_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
setenv("INFOPATH", system_path(PERF_INFO_PATH), 1);
execlp("info", "info", "perfman", page, NULL);
+ return -1;
}
-static void get_html_page_path(struct strbuf *page_path, const char *page)
+static int get_html_page_path(struct strbuf *page_path, const char *page)
{
struct stat st;
const char *html_path = system_path(PERF_HTML_PATH);
/* Check that we have a perf documentation directory. */
if (stat(mkpath("%s/perf.html", html_path), &st)
- || !S_ISREG(st.st_mode))
- die("'%s': not a documentation directory.", html_path);
+ || !S_ISREG(st.st_mode)) {
+ pr_err("'%s': not a documentation directory.", html_path);
+ return -1;
+ }
strbuf_init(page_path, 0);
strbuf_addf(page_path, "%s/%s.html", html_path, page);
+
+ return 0;
}
/*
@@ -401,19 +413,23 @@ static void open_html(const char *path)
}
#endif
-static void show_html_page(const char *perf_cmd)
+static int show_html_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
struct strbuf page_path; /* it leaks but we exec bellow */
- get_html_page_path(&page_path, page);
+ if (get_html_page_path(&page_path, page) != 0)
+ return -1;
open_html(page_path.buf);
+
+ return 0;
}
-int cmd_help(int argc, const char **argv, const char *prefix __used)
+int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char *alias;
+ int rc = 0;
load_command_list("perf-", &main_cmds, &other_cmds);
@@ -444,16 +460,20 @@ int cmd_help(int argc, const char **argv, const char *prefix __used)
switch (help_format) {
case HELP_FORMAT_MAN:
- show_man_page(argv[0]);
+ rc = show_man_page(argv[0]);
break;
case HELP_FORMAT_INFO:
- show_info_page(argv[0]);
+ rc = show_info_page(argv[0]);
break;
case HELP_FORMAT_WEB:
- show_html_page(argv[0]);
+ rc = show_html_page(argv[0]);
+ break;
+ case HELP_FORMAT_NONE:
+ /* fall-through */
default:
+ rc = -1;
break;
}
- return 0;
+ return rc;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 3beab489afc..1eaa6617c81 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -17,9 +17,9 @@
static char const *input_name = "-";
static bool inject_build_ids;
-static int perf_event__repipe_synth(struct perf_tool *tool __used,
+static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct machine *machine __used)
+ struct machine *machine __maybe_unused)
{
uint32_t size;
void *buf = event;
@@ -40,7 +40,8 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used,
static int perf_event__repipe_op2_synth(struct perf_tool *tool,
union perf_event *event,
- struct perf_session *session __used)
+ struct perf_session *session
+ __maybe_unused)
{
return perf_event__repipe_synth(tool, event, NULL);
}
@@ -52,13 +53,14 @@ static int perf_event__repipe_event_type_synth(struct perf_tool *tool,
}
static int perf_event__repipe_tracing_data_synth(union perf_event *event,
- struct perf_session *session __used)
+ struct perf_session *session
+ __maybe_unused)
{
return perf_event__repipe_synth(NULL, event, NULL);
}
static int perf_event__repipe_attr(union perf_event *event,
- struct perf_evlist **pevlist __used)
+ struct perf_evlist **pevlist __maybe_unused)
{
int ret;
ret = perf_event__process_attr(event, pevlist);
@@ -70,7 +72,7 @@ static int perf_event__repipe_attr(union perf_event *event,
static int perf_event__repipe(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return perf_event__repipe_synth(tool, event, machine);
@@ -78,8 +80,8 @@ static int perf_event__repipe(struct perf_tool *tool,
static int perf_event__repipe_sample(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
- struct perf_evsel *evsel __used,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
return perf_event__repipe_synth(tool, event, machine);
@@ -163,7 +165,7 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool,
static int perf_event__inject_buildid(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
@@ -191,10 +193,13 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
* If this fails, too bad, let the other side
* account this as unresolved.
*/
- } else
+ } else {
+#ifndef NO_LIBELF_SUPPORT
pr_warning("no symbols found in %s, maybe "
"install a debug package?\n",
al.map->dso->long_name);
+#endif
+ }
}
}
@@ -221,7 +226,7 @@ struct perf_tool perf_inject = {
extern volatile int session_done;
-static void sig_handler(int sig __attribute__((__unused__)))
+static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
@@ -264,7 +269,7 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_inject(int argc, const char **argv, const char *prefix __used)
+int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, options, report_usage, 0);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index ce35015f2dc..bc912c68f49 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1,6 +1,8 @@
#include "builtin.h"
#include "perf.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
@@ -57,46 +59,52 @@ static unsigned long nr_allocs, nr_cross_allocs;
#define PATH_SYS_NODE "/sys/devices/system/node"
-struct perf_kmem {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
-static void init_cpunode_map(void)
+static int init_cpunode_map(void)
{
FILE *fp;
- int i;
+ int i, err = -1;
fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
if (!fp) {
max_cpu_num = 4096;
- return;
+ return 0;
+ }
+
+ if (fscanf(fp, "%d", &max_cpu_num) < 1) {
+ pr_err("Failed to read 'kernel_max' from sysfs");
+ goto out_close;
}
- if (fscanf(fp, "%d", &max_cpu_num) < 1)
- die("Failed to read 'kernel_max' from sysfs");
max_cpu_num++;
cpunode_map = calloc(max_cpu_num, sizeof(int));
- if (!cpunode_map)
- die("calloc");
+ if (!cpunode_map) {
+ pr_err("%s: calloc failed\n", __func__);
+ goto out_close;
+ }
+
for (i = 0; i < max_cpu_num; i++)
cpunode_map[i] = -1;
+
+ err = 0;
+out_close:
fclose(fp);
+ return err;
}
-static void setup_cpunode_map(void)
+static int setup_cpunode_map(void)
{
struct dirent *dent1, *dent2;
DIR *dir1, *dir2;
unsigned int cpu, mem;
char buf[PATH_MAX];
- init_cpunode_map();
+ if (init_cpunode_map())
+ return -1;
dir1 = opendir(PATH_SYS_NODE);
if (!dir1)
- return;
+ return -1;
while ((dent1 = readdir(dir1)) != NULL) {
if (dent1->d_type != DT_DIR ||
@@ -116,10 +124,11 @@ static void setup_cpunode_map(void)
closedir(dir2);
}
closedir(dir1);
+ return 0;
}
-static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
- int bytes_req, int bytes_alloc, int cpu)
+static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
+ int bytes_req, int bytes_alloc, int cpu)
{
struct rb_node **node = &root_alloc_stat.rb_node;
struct rb_node *parent = NULL;
@@ -143,8 +152,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
- if (!data)
- die("malloc");
+ if (!data) {
+ pr_err("%s: malloc failed\n", __func__);
+ return -1;
+ }
data->ptr = ptr;
data->pingpong = 0;
data->hit = 1;
@@ -156,9 +167,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
}
data->call_site = call_site;
data->alloc_cpu = cpu;
+ return 0;
}
-static void insert_caller_stat(unsigned long call_site,
+static int insert_caller_stat(unsigned long call_site,
int bytes_req, int bytes_alloc)
{
struct rb_node **node = &root_caller_stat.rb_node;
@@ -183,8 +195,10 @@ static void insert_caller_stat(unsigned long call_site,
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
- if (!data)
- die("malloc");
+ if (!data) {
+ pr_err("%s: malloc failed\n", __func__);
+ return -1;
+ }
data->call_site = call_site;
data->pingpong = 0;
data->hit = 1;
@@ -194,39 +208,43 @@ static void insert_caller_stat(unsigned long call_site,
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &root_caller_stat);
}
+
+ return 0;
}
-static void process_alloc_event(void *data,
- struct event_format *event,
- int cpu,
- u64 timestamp __used,
- struct thread *thread __used,
- int node)
+static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- unsigned long call_site;
- unsigned long ptr;
- int bytes_req;
- int bytes_alloc;
- int node1, node2;
-
- ptr = raw_field_value(event, "ptr", data);
- call_site = raw_field_value(event, "call_site", data);
- bytes_req = raw_field_value(event, "bytes_req", data);
- bytes_alloc = raw_field_value(event, "bytes_alloc", data);
+ unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
+ call_site = perf_evsel__intval(evsel, sample, "call_site");
+ int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
+ bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
- insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
- insert_caller_stat(call_site, bytes_req, bytes_alloc);
+ if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
+ insert_caller_stat(call_site, bytes_req, bytes_alloc))
+ return -1;
total_requested += bytes_req;
total_allocated += bytes_alloc;
- if (node) {
- node1 = cpunode_map[cpu];
- node2 = raw_field_value(event, "node", data);
+ nr_allocs++;
+ return 0;
+}
+
+static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ int ret = perf_evsel__process_alloc_event(evsel, sample);
+
+ if (!ret) {
+ int node1 = cpunode_map[sample->cpu],
+ node2 = perf_evsel__intval(evsel, sample, "node");
+
if (node1 != node2)
nr_cross_allocs++;
}
- nr_allocs++;
+
+ return ret;
}
static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
@@ -257,66 +275,37 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
return NULL;
}
-static void process_free_event(void *data,
- struct event_format *event,
- int cpu,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_free_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- unsigned long ptr;
+ unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
struct alloc_stat *s_alloc, *s_caller;
- ptr = raw_field_value(event, "ptr", data);
-
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
if (!s_alloc)
- return;
+ return 0;
- if (cpu != s_alloc->alloc_cpu) {
+ if ((short)sample->cpu != s_alloc->alloc_cpu) {
s_alloc->pingpong++;
s_caller = search_alloc_stat(0, s_alloc->call_site,
&root_caller_stat, callsite_cmp);
- assert(s_caller);
+ if (!s_caller)
+ return -1;
s_caller->pingpong++;
}
s_alloc->alloc_cpu = -1;
-}
-static void process_raw_event(struct perf_tool *tool,
- union perf_event *raw_event __used, void *data,
- int cpu, u64 timestamp, struct thread *thread)
-{
- struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool);
- struct event_format *event;
- int type;
-
- type = trace_parse_common_type(kmem->session->pevent, data);
- event = pevent_find_event(kmem->session->pevent, type);
-
- if (!strcmp(event->name, "kmalloc") ||
- !strcmp(event->name, "kmem_cache_alloc")) {
- process_alloc_event(data, event, cpu, timestamp, thread, 0);
- return;
- }
-
- if (!strcmp(event->name, "kmalloc_node") ||
- !strcmp(event->name, "kmem_cache_alloc_node")) {
- process_alloc_event(data, event, cpu, timestamp, thread, 1);
- return;
- }
-
- if (!strcmp(event->name, "kfree") ||
- !strcmp(event->name, "kmem_cache_free")) {
- process_free_event(data, event, cpu, timestamp, thread);
- return;
- }
+ return 0;
}
-static int process_sample_event(struct perf_tool *tool,
+typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
@@ -329,18 +318,18 @@ static int process_sample_event(struct perf_tool *tool,
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- process_raw_event(tool, event, sample->raw_data, sample->cpu,
- sample->time, thread);
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
+ return f(evsel, sample);
+ }
return 0;
}
-static struct perf_kmem perf_kmem = {
- .tool = {
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .ordered_samples = true,
- },
+static struct perf_tool perf_kmem = {
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .ordered_samples = true,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -496,22 +485,32 @@ static int __cmd_kmem(void)
{
int err = -EINVAL;
struct perf_session *session;
-
- session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_kmem.tool);
+ const struct perf_evsel_str_handler kmem_tracepoints[] = {
+ { "kmem:kmalloc", perf_evsel__process_alloc_event, },
+ { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
+ { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
+ { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
+ { "kmem:kfree", perf_evsel__process_free_event, },
+ { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
+ };
+
+ session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem);
if (session == NULL)
return -ENOMEM;
- perf_kmem.session = session;
-
if (perf_session__create_kernel_maps(session) < 0)
goto out_delete;
if (!perf_session__has_traces(session, "kmem record"))
goto out_delete;
+ if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ return -1;
+ }
+
setup_pager();
- err = perf_session__process_events(session, &perf_kmem.tool);
+ err = perf_session__process_events(session, &perf_kmem);
if (err != 0)
goto out_delete;
sort_result();
@@ -635,8 +634,10 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
for (i = 0; i < NUM_AVAIL_SORTS; i++) {
if (!strcmp(avail_sorts[i]->name, tok)) {
sort = malloc(sizeof(*sort));
- if (!sort)
- die("malloc");
+ if (!sort) {
+ pr_err("%s: malloc failed\n", __func__);
+ return -1;
+ }
memcpy(sort, avail_sorts[i], sizeof(*sort));
list_add_tail(&sort->list, list);
return 0;
@@ -651,8 +652,10 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
char *tok;
char *str = strdup(arg);
- if (!str)
- die("strdup");
+ if (!str) {
+ pr_err("%s: strdup failed\n", __func__);
+ return -1;
+ }
while (true) {
tok = strsep(&str, ",");
@@ -669,8 +672,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
return 0;
}
-static int parse_sort_opt(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_sort_opt(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
if (!arg)
return -1;
@@ -683,22 +686,24 @@ static int parse_sort_opt(const struct option *opt __used,
return 0;
}
-static int parse_caller_opt(const struct option *opt __used,
- const char *arg __used, int unset __used)
+static int parse_caller_opt(const struct option *opt __maybe_unused,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
{
caller_flag = (alloc_flag + 1);
return 0;
}
-static int parse_alloc_opt(const struct option *opt __used,
- const char *arg __used, int unset __used)
+static int parse_alloc_opt(const struct option *opt __maybe_unused,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
{
alloc_flag = (caller_flag + 1);
return 0;
}
-static int parse_line_opt(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_line_opt(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
int lines;
@@ -768,7 +773,7 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-int cmd_kmem(int argc, const char **argv, const char *prefix __used)
+int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
@@ -780,7 +785,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used)
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strcmp(argv[0], "stat")) {
- setup_cpunode_map();
+ if (setup_cpunode_map())
+ return -1;
if (list_empty(&caller_sort))
setup_sorting(&caller_sort, default_sort_order);
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 9fc6e0fa3dc..a28c9cad904 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1,6 +1,7 @@
#include "builtin.h"
#include "perf.h"
+#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
@@ -10,8 +11,10 @@
#include "util/parse-options.h"
#include "util/trace-event.h"
-
#include "util/debug.h"
+#include "util/debugfs.h"
+#include "util/tool.h"
+#include "util/stat.h"
#include <sys/prctl.h>
@@ -19,11 +22,836 @@
#include <pthread.h>
#include <math.h>
-static const char *file_name;
+#include "../../arch/x86/include/asm/svm.h"
+#include "../../arch/x86/include/asm/vmx.h"
+#include "../../arch/x86/include/asm/kvm.h"
+
+struct event_key {
+ #define INVALID_KEY (~0ULL)
+ u64 key;
+ int info;
+};
+
+struct kvm_events_ops {
+ bool (*is_begin_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key);
+ bool (*is_end_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key);
+ void (*decode_key)(struct event_key *key, char decode[20]);
+ const char *name;
+};
+
+static void exit_event_get_key(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = perf_evsel__intval(evsel, sample, "exit_reason");
+}
+
+static bool kvm_exit_event(struct perf_evsel *evsel)
+{
+ return !strcmp(evsel->name, "kvm:kvm_exit");
+}
+
+static bool exit_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ if (kvm_exit_event(evsel)) {
+ exit_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool kvm_entry_event(struct perf_evsel *evsel)
+{
+ return !strcmp(evsel->name, "kvm:kvm_entry");
+}
+
+static bool exit_event_end(struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+struct exit_reasons_table {
+ unsigned long exit_code;
+ const char *reason;
+};
+
+struct exit_reasons_table vmx_exit_reasons[] = {
+ VMX_EXIT_REASONS
+};
+
+struct exit_reasons_table svm_exit_reasons[] = {
+ SVM_EXIT_REASONS
+};
+
+static int cpu_isa;
+
+static const char *get_exit_reason(u64 exit_code)
+{
+ int table_size = ARRAY_SIZE(svm_exit_reasons);
+ struct exit_reasons_table *table = svm_exit_reasons;
+
+ if (cpu_isa == 1) {
+ table = vmx_exit_reasons;
+ table_size = ARRAY_SIZE(vmx_exit_reasons);
+ }
+
+ while (table_size--) {
+ if (table->exit_code == exit_code)
+ return table->reason;
+ table++;
+ }
+
+ pr_err("unknown kvm exit code:%lld on %s\n",
+ (unsigned long long)exit_code, cpu_isa ? "VMX" : "SVM");
+ return "UNKNOWN";
+}
+
+static void exit_event_decode_key(struct event_key *key, char decode[20])
+{
+ const char *exit_reason = get_exit_reason(key->key);
+
+ scnprintf(decode, 20, "%s", exit_reason);
+}
+
+static struct kvm_events_ops exit_events = {
+ .is_begin_event = exit_event_begin,
+ .is_end_event = exit_event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+ /*
+ * For the mmio events, we treat:
+ * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
+ * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
+ */
+static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = perf_evsel__intval(evsel, sample, "gpa");
+ key->info = perf_evsel__intval(evsel, sample, "type");
+}
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+static bool mmio_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ /* MMIO read begin event in kernel. */
+ if (kvm_exit_event(evsel))
+ return true;
+
+ /* MMIO write begin event in kernel. */
+ if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+ perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
+ mmio_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct event_key *key)
+{
+ /* MMIO write end event in kernel. */
+ if (kvm_entry_event(evsel))
+ return true;
+
+ /* MMIO read end event in kernel.*/
+ if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+ perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
+ mmio_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static void mmio_event_decode_key(struct event_key *key, char decode[20])
+{
+ scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
+ key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
+}
+
+static struct kvm_events_ops mmio_events = {
+ .is_begin_event = mmio_event_begin,
+ .is_end_event = mmio_event_end,
+ .decode_key = mmio_event_decode_key,
+ .name = "MMIO Access"
+};
+
+ /* The time of emulation pio access is from kvm_pio to kvm_entry. */
+static void ioport_event_get_key(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = perf_evsel__intval(evsel, sample, "port");
+ key->info = perf_evsel__intval(evsel, sample, "rw");
+}
+
+static bool ioport_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (!strcmp(evsel->name, "kvm:kvm_pio")) {
+ ioport_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool ioport_event_end(struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void ioport_event_decode_key(struct event_key *key, char decode[20])
+{
+ scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
+ key->info ? "POUT" : "PIN");
+}
+
+static struct kvm_events_ops ioport_events = {
+ .is_begin_event = ioport_event_begin,
+ .is_end_event = ioport_event_end,
+ .decode_key = ioport_event_decode_key,
+ .name = "IO Port Access"
+};
+
+static const char *report_event = "vmexit";
+struct kvm_events_ops *events_ops;
+
+static bool register_kvm_events_ops(void)
+{
+ bool ret = true;
+
+ if (!strcmp(report_event, "vmexit"))
+ events_ops = &exit_events;
+ else if (!strcmp(report_event, "mmio"))
+ events_ops = &mmio_events;
+ else if (!strcmp(report_event, "ioport"))
+ events_ops = &ioport_events;
+ else {
+ pr_err("Unknown report event:%s\n", report_event);
+ ret = false;
+ }
+
+ return ret;
+}
+
+struct kvm_event_stats {
+ u64 time;
+ struct stats stats;
+};
+
+struct kvm_event {
+ struct list_head hash_entry;
+ struct rb_node rb;
+
+ struct event_key key;
+
+ struct kvm_event_stats total;
+
+ #define DEFAULT_VCPU_NUM 8
+ int max_vcpu;
+ struct kvm_event_stats *vcpu;
+};
+
+struct vcpu_event_record {
+ int vcpu_id;
+ u64 start_time;
+ struct kvm_event *last_event;
+};
+
+#define EVENTS_BITS 12
+#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
+
+static u64 total_time;
+static u64 total_count;
+static struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
+
+static void init_kvm_event_record(void)
+{
+ int i;
+
+ for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++)
+ INIT_LIST_HEAD(&kvm_events_cache[i]);
+}
+
+static int kvm_events_hash_fn(u64 key)
+{
+ return key & (EVENTS_CACHE_SIZE - 1);
+}
+
+static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
+{
+ int old_max_vcpu = event->max_vcpu;
+
+ if (vcpu_id < event->max_vcpu)
+ return true;
+
+ while (event->max_vcpu <= vcpu_id)
+ event->max_vcpu += DEFAULT_VCPU_NUM;
+
+ event->vcpu = realloc(event->vcpu,
+ event->max_vcpu * sizeof(*event->vcpu));
+ if (!event->vcpu) {
+ pr_err("Not enough memory\n");
+ return false;
+ }
+
+ memset(event->vcpu + old_max_vcpu, 0,
+ (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
+ return true;
+}
+
+static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
+{
+ struct kvm_event *event;
+
+ event = zalloc(sizeof(*event));
+ if (!event) {
+ pr_err("Not enough memory\n");
+ return NULL;
+ }
+
+ event->key = *key;
+ return event;
+}
+
+static struct kvm_event *find_create_kvm_event(struct event_key *key)
+{
+ struct kvm_event *event;
+ struct list_head *head;
+
+ BUG_ON(key->key == INVALID_KEY);
+
+ head = &kvm_events_cache[kvm_events_hash_fn(key->key)];
+ list_for_each_entry(event, head, hash_entry)
+ if (event->key.key == key->key && event->key.info == key->info)
+ return event;
+
+ event = kvm_alloc_init_event(key);
+ if (!event)
+ return NULL;
+
+ list_add(&event->hash_entry, head);
+ return event;
+}
+
+static bool handle_begin_event(struct vcpu_event_record *vcpu_record,
+ struct event_key *key, u64 timestamp)
+{
+ struct kvm_event *event = NULL;
+
+ if (key->key != INVALID_KEY)
+ event = find_create_kvm_event(key);
+
+ vcpu_record->last_event = event;
+ vcpu_record->start_time = timestamp;
+ return true;
+}
+
+static void
+kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
+{
+ kvm_stats->time += time_diff;
+ update_stats(&kvm_stats->stats, time_diff);
+}
+
+static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
+{
+ struct kvm_event_stats *kvm_stats = &event->total;
+
+ if (vcpu_id != -1)
+ kvm_stats = &event->vcpu[vcpu_id];
+
+ return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
+ avg_stats(&kvm_stats->stats));
+}
+
+static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
+ u64 time_diff)
+{
+ kvm_update_event_stats(&event->total, time_diff);
+
+ if (!kvm_event_expand(event, vcpu_id))
+ return false;
+
+ kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
+ return true;
+}
+
+static bool handle_end_event(struct vcpu_event_record *vcpu_record,
+ struct event_key *key, u64 timestamp)
+{
+ struct kvm_event *event;
+ u64 time_begin, time_diff;
+
+ event = vcpu_record->last_event;
+ time_begin = vcpu_record->start_time;
+
+ /* The begin event is not caught. */
+ if (!time_begin)
+ return true;
+
+ /*
+ * In some case, the 'begin event' only records the start timestamp,
+ * the actual event is recognized in the 'end event' (e.g. mmio-event).
+ */
+
+ /* Both begin and end events did not get the key. */
+ if (!event && key->key == INVALID_KEY)
+ return true;
+
+ if (!event)
+ event = find_create_kvm_event(key);
+
+ if (!event)
+ return false;
+
+ vcpu_record->last_event = NULL;
+ vcpu_record->start_time = 0;
+
+ BUG_ON(timestamp < time_begin);
+
+ time_diff = timestamp - time_begin;
+ return update_kvm_event(event, vcpu_record->vcpu_id, time_diff);
+}
+
+static
+struct vcpu_event_record *per_vcpu_record(struct thread *thread,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ /* Only kvm_entry records vcpu id. */
+ if (!thread->priv && kvm_entry_event(evsel)) {
+ struct vcpu_event_record *vcpu_record;
+
+ vcpu_record = zalloc(sizeof(*vcpu_record));
+ if (!vcpu_record) {
+ pr_err("%s: Not enough memory\n", __func__);
+ return NULL;
+ }
+
+ vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
+ thread->priv = vcpu_record;
+ }
+
+ return thread->priv;
+}
+
+static bool handle_kvm_event(struct thread *thread, struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct vcpu_event_record *vcpu_record;
+ struct event_key key = {.key = INVALID_KEY};
+
+ vcpu_record = per_vcpu_record(thread, evsel, sample);
+ if (!vcpu_record)
+ return true;
+
+ if (events_ops->is_begin_event(evsel, sample, &key))
+ return handle_begin_event(vcpu_record, &key, sample->time);
+
+ if (events_ops->is_end_event(evsel, sample, &key))
+ return handle_end_event(vcpu_record, &key, sample->time);
+
+ return true;
+}
+
+typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
+struct kvm_event_key {
+ const char *name;
+ key_cmp_fun key;
+};
+
+static int trace_vcpu = -1;
+#define GET_EVENT_KEY(func, field) \
+static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
+{ \
+ if (vcpu == -1) \
+ return event->total.field; \
+ \
+ if (vcpu >= event->max_vcpu) \
+ return 0; \
+ \
+ return event->vcpu[vcpu].field; \
+}
+
+#define COMPARE_EVENT_KEY(func, field) \
+GET_EVENT_KEY(func, field) \
+static int compare_kvm_event_ ## func(struct kvm_event *one, \
+ struct kvm_event *two, int vcpu)\
+{ \
+ return get_event_ ##func(one, vcpu) > \
+ get_event_ ##func(two, vcpu); \
+}
+
+GET_EVENT_KEY(time, time);
+COMPARE_EVENT_KEY(count, stats.n);
+COMPARE_EVENT_KEY(mean, stats.mean);
+
+#define DEF_SORT_NAME_KEY(name, compare_key) \
+ { #name, compare_kvm_event_ ## compare_key }
+
+static struct kvm_event_key keys[] = {
+ DEF_SORT_NAME_KEY(sample, count),
+ DEF_SORT_NAME_KEY(time, mean),
+ { NULL, NULL }
+};
+
+static const char *sort_key = "sample";
+static key_cmp_fun compare;
+
+static bool select_key(void)
+{
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return true;
+ }
+ }
+
+ pr_err("Unknown compare key:%s\n", sort_key);
+ return false;
+}
+
+static struct rb_root result;
+static void insert_to_result(struct kvm_event *event, key_cmp_fun bigger,
+ int vcpu)
+{
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct kvm_event *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct kvm_event, rb);
+ parent = *rb;
+
+ if (bigger(event, p, vcpu))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&event->rb, parent, rb);
+ rb_insert_color(&event->rb, &result);
+}
+
+static void update_total_count(struct kvm_event *event, int vcpu)
+{
+ total_count += get_event_count(event, vcpu);
+ total_time += get_event_time(event, vcpu);
+}
+
+static bool event_is_valid(struct kvm_event *event, int vcpu)
+{
+ return !!get_event_count(event, vcpu);
+}
+
+static void sort_result(int vcpu)
+{
+ unsigned int i;
+ struct kvm_event *event;
+
+ for (i = 0; i < EVENTS_CACHE_SIZE; i++)
+ list_for_each_entry(event, &kvm_events_cache[i], hash_entry)
+ if (event_is_valid(event, vcpu)) {
+ update_total_count(event, vcpu);
+ insert_to_result(event, compare, vcpu);
+ }
+}
+
+/* returns left most element of result, and erase it */
+static struct kvm_event *pop_from_result(void)
+{
+ struct rb_node *node = rb_first(&result);
+
+ if (!node)
+ return NULL;
+
+ rb_erase(node, &result);
+ return container_of(node, struct kvm_event, rb);
+}
+
+static void print_vcpu_info(int vcpu)
+{
+ pr_info("Analyze events for ");
+
+ if (vcpu == -1)
+ pr_info("all VCPUs:\n\n");
+ else
+ pr_info("VCPU %d:\n\n", vcpu);
+}
+
+static void print_result(int vcpu)
+{
+ char decode[20];
+ struct kvm_event *event;
+
+ pr_info("\n\n");
+ print_vcpu_info(vcpu);
+ pr_info("%20s ", events_ops->name);
+ pr_info("%10s ", "Samples");
+ pr_info("%9s ", "Samples%");
+
+ pr_info("%9s ", "Time%");
+ pr_info("%16s ", "Avg time");
+ pr_info("\n\n");
+
+ while ((event = pop_from_result())) {
+ u64 ecount, etime;
+
+ ecount = get_event_count(event, vcpu);
+ etime = get_event_time(event, vcpu);
+
+ events_ops->decode_key(&event->key, decode);
+ pr_info("%20s ", decode);
+ pr_info("%10llu ", (unsigned long long)ecount);
+ pr_info("%8.2f%% ", (double)ecount / total_count * 100);
+ pr_info("%8.2f%% ", (double)etime / total_time * 100);
+ pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
+ kvm_event_rel_stddev(vcpu, event));
+ pr_info("\n");
+ }
+
+ pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n",
+ (unsigned long long)total_count, total_time / 1e3);
+}
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine, sample->tid);
+
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (!handle_kvm_event(thread, evsel, sample))
+ return -1;
+
+ return 0;
+}
+
+static struct perf_tool eops = {
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .ordered_samples = true,
+};
+
+static int get_cpu_isa(struct perf_session *session)
+{
+ char *cpuid = session->header.env.cpuid;
+ int isa;
+
+ if (strstr(cpuid, "Intel"))
+ isa = 1;
+ else if (strstr(cpuid, "AMD"))
+ isa = 0;
+ else {
+ pr_err("CPU %s is not supported.\n", cpuid);
+ isa = -ENOTSUP;
+ }
+
+ return isa;
+}
+
+static const char *file_name;
+
+static int read_events(void)
+{
+ struct perf_session *kvm_session;
+ int ret;
+
+ kvm_session = perf_session__new(file_name, O_RDONLY, 0, false, &eops);
+ if (!kvm_session) {
+ pr_err("Initializing perf session failed\n");
+ return -EINVAL;
+ }
+
+ if (!perf_session__has_traces(kvm_session, "kvm record"))
+ return -EINVAL;
+
+ /*
+ * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
+ * traced in the old kernel.
+ */
+ ret = get_cpu_isa(kvm_session);
+
+ if (ret < 0)
+ return ret;
+
+ cpu_isa = ret;
+
+ return perf_session__process_events(kvm_session, &eops);
+}
+
+static bool verify_vcpu(int vcpu)
+{
+ if (vcpu != -1 && vcpu < 0) {
+ pr_err("Invalid vcpu:%d.\n", vcpu);
+ return false;
+ }
+
+ return true;
+}
+
+static int kvm_events_report_vcpu(int vcpu)
+{
+ int ret = -EINVAL;
+
+ if (!verify_vcpu(vcpu))
+ goto exit;
+
+ if (!select_key())
+ goto exit;
+
+ if (!register_kvm_events_ops())
+ goto exit;
+
+ init_kvm_event_record();
+ setup_pager();
+
+ ret = read_events();
+ if (ret)
+ goto exit;
+
+ sort_result(vcpu);
+ print_result(vcpu);
+exit:
+ return ret;
+}
+
+static const char * const record_args[] = {
+ "record",
+ "-R",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "kvm:kvm_entry",
+ "-e", "kvm:kvm_exit",
+ "-e", "kvm:kvm_mmio",
+ "-e", "kvm:kvm_pio",
+};
+
+#define STRDUP_FAIL_EXIT(s) \
+ ({ char *_p; \
+ _p = strdup(s); \
+ if (!_p) \
+ return -ENOMEM; \
+ _p; \
+ })
+
+static int kvm_events_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ if (rec_argv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
+
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
+ rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+static const char * const kvm_events_report_usage[] = {
+ "perf kvm stat report [<options>]",
+ NULL
+};
+
+static const struct option kvm_events_report_options[] = {
+ OPT_STRING(0, "event", &report_event, "report event",
+ "event for reporting: vmexit, mmio, ioport"),
+ OPT_INTEGER(0, "vcpu", &trace_vcpu,
+ "vcpu id to report"),
+ OPT_STRING('k', "key", &sort_key, "sort-key",
+ "key for sorting: sample(sort by samples number)"
+ " time (sort by avg time)"),
+ OPT_END()
+};
+
+static int kvm_events_report(int argc, const char **argv)
+{
+ symbol__init();
+
+ if (argc) {
+ argc = parse_options(argc, argv,
+ kvm_events_report_options,
+ kvm_events_report_usage, 0);
+ if (argc)
+ usage_with_options(kvm_events_report_usage,
+ kvm_events_report_options);
+ }
+
+ return kvm_events_report_vcpu(trace_vcpu);
+}
+
+static void print_kvm_stat_usage(void)
+{
+ printf("Usage: perf kvm stat <command>\n\n");
+
+ printf("# Available commands:\n");
+ printf("\trecord: record kvm events\n");
+ printf("\treport: report statistical data of kvm events\n");
+
+ printf("\nOtherwise, it is the alias of 'perf stat':\n");
+}
+
+static int kvm_cmd_stat(int argc, const char **argv)
+{
+ if (argc == 1) {
+ print_kvm_stat_usage();
+ goto perf_stat;
+ }
+
+ if (!strncmp(argv[1], "rec", 3))
+ return kvm_events_record(argc - 1, argv + 1);
+
+ if (!strncmp(argv[1], "rep", 3))
+ return kvm_events_report(argc - 1 , argv + 1);
+
+perf_stat:
+ return cmd_stat(argc, argv, NULL);
+}
+
static char name_buffer[256];
static const char * const kvm_usage[] = {
- "perf kvm [<options>] {top|record|report|diff|buildid-list}",
+ "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
NULL
};
@@ -102,7 +930,7 @@ static int __cmd_buildid_list(int argc, const char **argv)
return cmd_buildid_list(i, rec_argv, NULL);
}
-int cmd_kvm(int argc, const char **argv, const char *prefix __used)
+int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
{
perf_host = 0;
perf_guest = 1;
@@ -135,6 +963,8 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __used)
return cmd_top(argc, argv, NULL);
else if (!strncmp(argv[0], "buildid-list", 12))
return __cmd_buildid_list(argc, argv);
+ else if (!strncmp(argv[0], "stat", 4))
+ return kvm_cmd_stat(argc, argv);
else
usage_with_options(kvm_usage, kvm_options);
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 6313b6eb3eb..1948eceb517 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -14,20 +14,20 @@
#include "util/parse-events.h"
#include "util/cache.h"
-int cmd_list(int argc, const char **argv, const char *prefix __used)
+int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
setup_pager();
if (argc == 1)
- print_events(NULL);
+ print_events(NULL, false);
else {
int i;
for (i = 1; i < argc; ++i) {
- if (i > 1)
+ if (i > 2)
putchar('\n');
if (strncmp(argv[i], "tracepoint", 10) == 0)
- print_tracepoint_events(NULL, NULL);
+ print_tracepoint_events(NULL, NULL, false);
else if (strcmp(argv[i], "hw") == 0 ||
strcmp(argv[i], "hardware") == 0)
print_events_type(PERF_TYPE_HARDWARE);
@@ -36,13 +36,15 @@ int cmd_list(int argc, const char **argv, const char *prefix __used)
print_events_type(PERF_TYPE_SOFTWARE);
else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
- print_hwcache_events(NULL);
+ print_hwcache_events(NULL, false);
+ else if (strcmp(argv[i], "--raw-dump") == 0)
+ print_events(NULL, true);
else {
char *sep = strchr(argv[i], ':'), *s;
int sep_idx;
if (sep == NULL) {
- print_events(argv[i]);
+ print_events(argv[i], false);
continue;
}
sep_idx = sep - argv[i];
@@ -51,7 +53,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __used)
return -1;
s[sep_idx] = '\0';
- print_tracepoint_events(s, s + sep_idx + 1);
+ print_tracepoint_events(s, s + sep_idx + 1, false);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b3c42854886..7d6e0994988 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1,6 +1,8 @@
#include "builtin.h"
#include "perf.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
@@ -40,7 +42,7 @@ struct lock_stat {
struct rb_node rb; /* used for sorting */
/*
- * FIXME: raw_field_value() returns unsigned long long,
+ * FIXME: perf_evsel__intval() returns u64,
* so address of lockdep_map should be dealed as 64bit.
* Is there more better solution?
*/
@@ -160,8 +162,10 @@ static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
return st;
st = zalloc(sizeof(struct thread_stat));
- if (!st)
- die("memory allocation failed\n");
+ if (!st) {
+ pr_err("memory allocation failed\n");
+ return NULL;
+ }
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
@@ -180,8 +184,10 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid)
struct thread_stat *st;
st = zalloc(sizeof(struct thread_stat));
- if (!st)
- die("memory allocation failed\n");
+ if (!st) {
+ pr_err("memory allocation failed\n");
+ return NULL;
+ }
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
@@ -247,18 +253,20 @@ struct lock_key keys[] = {
{ NULL, NULL }
};
-static void select_key(void)
+static int select_key(void)
{
int i;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, sort_key)) {
compare = keys[i].key;
- return;
+ return 0;
}
}
- die("Unknown compare key:%s\n", sort_key);
+ pr_err("Unknown compare key: %s\n", sort_key);
+
+ return -1;
}
static void insert_to_result(struct lock_stat *st,
@@ -323,61 +331,24 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
return new;
alloc_failed:
- die("memory allocation failed\n");
+ pr_err("memory allocation failed\n");
+ return NULL;
}
static const char *input_name;
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
-struct trace_acquire_event {
- void *addr;
- const char *name;
- int flag;
-};
-
-struct trace_acquired_event {
- void *addr;
- const char *name;
-};
+struct trace_lock_handler {
+ int (*acquire_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
-struct trace_contended_event {
- void *addr;
- const char *name;
-};
+ int (*acquired_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
-struct trace_release_event {
- void *addr;
- const char *name;
-};
+ int (*contended_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
-struct trace_lock_handler {
- void (*acquire_event)(struct trace_acquire_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*acquired_event)(struct trace_acquired_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*contended_event)(struct trace_contended_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*release_event)(struct trace_release_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
+ int (*release_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
};
static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
@@ -390,8 +361,10 @@ static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
}
seq = zalloc(sizeof(struct lock_seq_stat));
- if (!seq)
- die("Not enough memory\n");
+ if (!seq) {
+ pr_err("memory allocation failed\n");
+ return NULL;
+ }
seq->state = SEQ_STATE_UNINITIALIZED;
seq->addr = addr;
@@ -414,33 +387,42 @@ enum acquire_flags {
READ_LOCK = 2,
};
-static void
-report_lock_acquire_event(struct trace_acquire_event *acquire_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_acquire_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+ int flag = perf_evsel__intval(evsel, sample, "flag");
+
+ memcpy(&addr, &tmp, sizeof(void *));
- ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, acquire_event->addr);
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
+
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
case SEQ_STATE_RELEASED:
- if (!acquire_event->flag) {
+ if (!flag) {
seq->state = SEQ_STATE_ACQUIRING;
} else {
- if (acquire_event->flag & TRY_LOCK)
+ if (flag & TRY_LOCK)
ls->nr_trylock++;
- if (acquire_event->flag & READ_LOCK)
+ if (flag & READ_LOCK)
ls->nr_readlock++;
seq->state = SEQ_STATE_READ_ACQUIRED;
seq->read_count = 1;
@@ -448,7 +430,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event,
}
break;
case SEQ_STATE_READ_ACQUIRED:
- if (acquire_event->flag & READ_LOCK) {
+ if (flag & READ_LOCK) {
seq->read_count++;
ls->nr_acquired++;
goto end;
@@ -473,38 +455,46 @@ broken:
}
ls->nr_acquire++;
- seq->prev_event_time = timestamp;
+ seq->prev_event_time = sample->time;
end:
- return;
+ return 0;
}
-static void
-report_lock_acquired_event(struct trace_acquired_event *acquired_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_acquired_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+
+ memcpy(&addr, &tmp, sizeof(void *));
- ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, acquired_event->addr);
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
- return;
+ return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_CONTENDED:
- contended_term = timestamp - seq->prev_event_time;
+ contended_term = sample->time - seq->prev_event_time;
ls->wait_time_total += contended_term;
if (contended_term < ls->wait_time_min)
ls->wait_time_min = contended_term;
@@ -529,33 +519,41 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
seq->state = SEQ_STATE_ACQUIRED;
ls->nr_acquired++;
- seq->prev_event_time = timestamp;
+ seq->prev_event_time = sample->time;
end:
- return;
+ return 0;
}
-static void
-report_lock_contended_event(struct trace_contended_event *contended_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_contended_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
- ls = lock_stat_findnew(contended_event->addr, contended_event->name);
+ memcpy(&addr, &tmp, sizeof(void *));
+
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, contended_event->addr);
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
- return;
+ return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_RELEASED:
@@ -576,28 +574,36 @@ report_lock_contended_event(struct trace_contended_event *contended_event,
seq->state = SEQ_STATE_CONTENDED;
ls->nr_contended++;
- seq->prev_event_time = timestamp;
+ seq->prev_event_time = sample->time;
end:
- return;
+ return 0;
}
-static void
-report_lock_release_event(struct trace_release_event *release_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_release_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+
+ memcpy(&addr, &tmp, sizeof(void *));
- ls = lock_stat_findnew(release_event->addr, release_event->name);
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, release_event->addr);
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
@@ -631,7 +637,7 @@ free_seq:
list_del(&seq->list);
free(seq);
end:
- return;
+ return 0;
}
/* lock oriented handlers */
@@ -645,96 +651,36 @@ static struct trace_lock_handler report_lock_ops = {
static struct trace_lock_handler *trace_handler;
-static void
-process_lock_acquire_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
-{
- struct trace_acquire_event acquire_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&acquire_event.addr, &tmp, sizeof(void *));
- acquire_event.name = (char *)raw_field_ptr(event, "name", data);
- acquire_event.flag = (int)raw_field_value(event, "flag", data);
-
- if (trace_handler->acquire_event)
- trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
-}
-
-static void
-process_lock_acquired_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct trace_acquired_event acquired_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&acquired_event.addr, &tmp, sizeof(void *));
- acquired_event.name = (char *)raw_field_ptr(event, "name", data);
-
if (trace_handler->acquire_event)
- trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
+ return trace_handler->acquire_event(evsel, sample);
+ return 0;
}
-static void
-process_lock_contended_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct trace_contended_event contended_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&contended_event.addr, &tmp, sizeof(void *));
- contended_event.name = (char *)raw_field_ptr(event, "name", data);
-
- if (trace_handler->acquire_event)
- trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
+ if (trace_handler->acquired_event)
+ return trace_handler->acquired_event(evsel, sample);
+ return 0;
}
-static void
-process_lock_release_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct trace_release_event release_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&release_event.addr, &tmp, sizeof(void *));
- release_event.name = (char *)raw_field_ptr(event, "name", data);
-
- if (trace_handler->acquire_event)
- trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
+ if (trace_handler->contended_event)
+ return trace_handler->contended_event(evsel, sample);
+ return 0;
}
-static void
-process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
+static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct event_format *event;
- int type;
-
- type = trace_parse_common_type(session->pevent, data);
- event = pevent_find_event(session->pevent, type);
-
- if (!strcmp(event->name, "lock_acquire"))
- process_lock_acquire_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "lock_acquired"))
- process_lock_acquired_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "lock_contended"))
- process_lock_contended_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "lock_release"))
- process_lock_release_event(data, event, cpu, timestamp, thread);
+ if (trace_handler->release_event)
+ return trace_handler->release_event(evsel, sample);
+ return 0;
}
static void print_bad_events(int bad, int total)
@@ -836,20 +782,29 @@ static void dump_map(void)
}
}
-static void dump_info(void)
+static int dump_info(void)
{
+ int rc = 0;
+
if (info_threads)
dump_threads();
else if (info_map)
dump_map();
- else
- die("Unknown type of information\n");
+ else {
+ rc = -1;
+ pr_err("Unknown type of information\n");
+ }
+
+ return rc;
}
-static int process_sample_event(struct perf_tool *tool __used,
+typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, sample->tid);
@@ -860,7 +815,10 @@ static int process_sample_event(struct perf_tool *tool __used,
return -1;
}
- process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
+ return f(evsel, sample);
+ }
return 0;
}
@@ -871,11 +829,25 @@ static struct perf_tool eops = {
.ordered_samples = true,
};
+static const struct perf_evsel_str_handler lock_tracepoints[] = {
+ { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
+ { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
+ { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
+ { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
+};
+
static int read_events(void)
{
session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
- if (!session)
- die("Initializing perf session failed\n");
+ if (!session) {
+ pr_err("Initializing perf session failed\n");
+ return -1;
+ }
+
+ if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ return -1;
+ }
return perf_session__process_events(session, &eops);
}
@@ -892,13 +864,18 @@ static void sort_result(void)
}
}
-static void __cmd_report(void)
+static int __cmd_report(void)
{
setup_pager();
- select_key();
- read_events();
+
+ if ((select_key() != 0) ||
+ (read_events() != 0))
+ return -1;
+
sort_result();
print_result();
+
+ return 0;
}
static const char * const report_usage[] = {
@@ -944,10 +921,6 @@ static const char *record_args[] = {
"-f",
"-m", "1024",
"-c", "1",
- "-e", "lock:lock_acquire",
- "-e", "lock:lock_acquired",
- "-e", "lock:lock_contended",
- "-e", "lock:lock_release",
};
static int __cmd_record(int argc, const char **argv)
@@ -955,15 +928,31 @@ static int __cmd_record(int argc, const char **argv)
unsigned int rec_argc, i, j;
const char **rec_argv;
+ for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
+ if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
+ pr_err("tracepoint %s is not enabled. "
+ "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
+ lock_tracepoints[i].name);
+ return 1;
+ }
+ }
+
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
- rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ /* factor of 2 is for -e in front of each tracepoint */
+ rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
+ for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = strdup(lock_tracepoints[j].name);
+ }
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
@@ -972,9 +961,10 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-int cmd_lock(int argc, const char **argv, const char *prefix __used)
+int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
{
unsigned int i;
+ int rc = 0;
symbol__init();
for (i = 0; i < LOCKHASH_SIZE; i++)
@@ -1009,11 +999,13 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
setup_pager();
- read_events();
- dump_info();
+ if (read_events() != 0)
+ rc = -1;
+ else
+ rc = dump_info();
} else {
usage_with_options(lock_usage, lock_options);
}
- return 0;
+ return rc;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index e215ae61b2a..118aa894657 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -143,8 +143,8 @@ static int parse_probe_event_argv(int argc, const char **argv)
return ret;
}
-static int opt_add_probe_event(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_add_probe_event(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
if (str) {
params.mod_events = true;
@@ -153,8 +153,8 @@ static int opt_add_probe_event(const struct option *opt __used,
return 0;
}
-static int opt_del_probe_event(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_del_probe_event(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
if (str) {
params.mod_events = true;
@@ -166,7 +166,7 @@ static int opt_del_probe_event(const struct option *opt __used,
}
static int opt_set_target(const struct option *opt, const char *str,
- int unset __used)
+ int unset __maybe_unused)
{
int ret = -ENOENT;
@@ -188,8 +188,8 @@ static int opt_set_target(const struct option *opt, const char *str,
}
#ifdef DWARF_SUPPORT
-static int opt_show_lines(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_show_lines(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
int ret = 0;
@@ -209,8 +209,8 @@ static int opt_show_lines(const struct option *opt __used,
return ret;
}
-static int opt_show_vars(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_show_vars(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
struct perf_probe_event *pev = &params.events[params.nevents];
int ret;
@@ -229,8 +229,8 @@ static int opt_show_vars(const struct option *opt __used,
}
#endif
-static int opt_set_filter(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_set_filter(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
const char *err;
@@ -327,7 +327,7 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_probe(int argc, const char **argv, const char *prefix __used)
+int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
{
int ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4db6e1ba54e..f14cb5fdb91 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -31,6 +31,15 @@
#include <sched.h>
#include <sys/mman.h>
+#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
+
+#ifdef NO_LIBUNWIND_SUPPORT
+static char callchain_help[] = CALLCHAIN_HELP "[fp]";
+#else
+static unsigned long default_stack_dump_size = 8192;
+static char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
+#endif
+
enum write_mode_t {
WRITE_FORCE,
WRITE_APPEND
@@ -62,32 +71,38 @@ static void advance_output(struct perf_record *rec, size_t size)
rec->bytes_written += size;
}
-static void write_output(struct perf_record *rec, void *buf, size_t size)
+static int write_output(struct perf_record *rec, void *buf, size_t size)
{
while (size) {
int ret = write(rec->output, buf, size);
- if (ret < 0)
- die("failed to write");
+ if (ret < 0) {
+ pr_err("failed to write\n");
+ return -1;
+ }
size -= ret;
buf += ret;
rec->bytes_written += ret;
}
+
+ return 0;
}
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
struct perf_record *rec = container_of(tool, struct perf_record, tool);
- write_output(rec, event, event->header.size);
+ if (write_output(rec, event, event->header.size) < 0)
+ return -1;
+
return 0;
}
-static void perf_record__mmap_read(struct perf_record *rec,
+static int perf_record__mmap_read(struct perf_record *rec,
struct perf_mmap *md)
{
unsigned int head = perf_mmap__read_head(md);
@@ -95,9 +110,10 @@ static void perf_record__mmap_read(struct perf_record *rec,
unsigned char *data = md->base + rec->page_size;
unsigned long size;
void *buf;
+ int rc = 0;
if (old == head)
- return;
+ return 0;
rec->samples++;
@@ -108,17 +124,26 @@ static void perf_record__mmap_read(struct perf_record *rec,
size = md->mask + 1 - (old & md->mask);
old += size;
- write_output(rec, buf, size);
+ if (write_output(rec, buf, size) < 0) {
+ rc = -1;
+ goto out;
+ }
}
buf = &data[old & md->mask];
size = head - old;
old += size;
- write_output(rec, buf, size);
+ if (write_output(rec, buf, size) < 0) {
+ rc = -1;
+ goto out;
+ }
md->prev = old;
perf_mmap__write_tail(md, old);
+
+out:
+ return rc;
}
static volatile int done = 0;
@@ -134,7 +159,7 @@ static void sig_handler(int sig)
signr = sig;
}
-static void perf_record__sig_exit(int exit_status __used, void *arg)
+static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
{
struct perf_record *rec = arg;
int status;
@@ -163,31 +188,32 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
if (evlist->nr_entries != other->nr_entries)
return false;
- pair = list_entry(other->entries.next, struct perf_evsel, node);
+ pair = perf_evlist__first(other);
list_for_each_entry(pos, &evlist->entries, node) {
if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
return false;
- pair = list_entry(pair->node.next, struct perf_evsel, node);
+ pair = perf_evsel__next(pair);
}
return true;
}
-static void perf_record__open(struct perf_record *rec)
+static int perf_record__open(struct perf_record *rec)
{
- struct perf_evsel *pos, *first;
+ struct perf_evsel *pos;
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct perf_record_opts *opts = &rec->opts;
-
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ int rc = 0;
perf_evlist__config_attrs(evlist, opts);
+ if (opts->group)
+ perf_evlist__set_leader(evlist);
+
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
- struct xyarray *group_fd = NULL;
/*
* Check if parse_single_tracepoint_event has already asked for
* PERF_SAMPLE_TIME.
@@ -202,24 +228,24 @@ static void perf_record__open(struct perf_record *rec)
*/
bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
- if (opts->group && pos != first)
- group_fd = first->fd;
fallback_missing_features:
if (opts->exclude_guest_missing)
attr->exclude_guest = attr->exclude_host = 0;
retry_sample_id:
attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
try_again:
- if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
- opts->group, group_fd) < 0) {
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
- exit(EXIT_FAILURE);
+ rc = -err;
+ goto out;
} else if (err == ENODEV && opts->target.cpu_list) {
- die("No such device - did you specify"
- " an out-of-range profile CPU?\n");
+ pr_err("No such device - did you specify"
+ " an out-of-range profile CPU?\n");
+ rc = -err;
+ goto out;
} else if (err == EINVAL) {
if (!opts->exclude_guest_missing &&
(attr->exclude_guest || attr->exclude_host)) {
@@ -266,42 +292,57 @@ try_again:
if (err == ENOENT) {
ui__error("The %s event is not supported.\n",
perf_evsel__name(pos));
- exit(EXIT_FAILURE);
+ rc = -err;
+ goto out;
}
printf("\n");
- error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
- err, strerror(err));
+ error("sys_perf_event_open() syscall returned with %d "
+ "(%s) for event %s. /bin/dmesg may provide "
+ "additional information.\n",
+ err, strerror(err), perf_evsel__name(pos));
#if defined(__i386__) || defined(__x86_64__)
- if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
- die("No hardware sampling interrupt available."
- " No APIC? If so then you can boot the kernel"
- " with the \"lapic\" boot parameter to"
- " force-enable it.\n");
+ if (attr->type == PERF_TYPE_HARDWARE &&
+ err == EOPNOTSUPP) {
+ pr_err("No hardware sampling interrupt available."
+ " No APIC? If so then you can boot the kernel"
+ " with the \"lapic\" boot parameter to"
+ " force-enable it.\n");
+ rc = -err;
+ goto out;
+ }
#endif
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ rc = -err;
+ goto out;
}
}
- if (perf_evlist__set_filters(evlist)) {
+ if (perf_evlist__apply_filters(evlist)) {
error("failed to set filter with %d (%s)\n", errno,
strerror(errno));
- exit(-1);
+ rc = -1;
+ goto out;
}
if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
- if (errno == EPERM)
- die("Permission error mapping pages.\n"
- "Consider increasing "
- "/proc/sys/kernel/perf_event_mlock_kb,\n"
- "or try again with a smaller value of -m/--mmap_pages.\n"
- "(current value: %d)\n", opts->mmap_pages);
- else if (!is_power_of_2(opts->mmap_pages))
- die("--mmap_pages/-m value must be a power of two.");
-
- die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ if (errno == EPERM) {
+ pr_err("Permission error mapping pages.\n"
+ "Consider increasing "
+ "/proc/sys/kernel/perf_event_mlock_kb,\n"
+ "or try again with a smaller value of -m/--mmap_pages.\n"
+ "(current value: %d)\n", opts->mmap_pages);
+ rc = -errno;
+ } else if (!is_power_of_2(opts->mmap_pages)) {
+ pr_err("--mmap_pages/-m value must be a power of two.");
+ rc = -EINVAL;
+ } else {
+ pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ rc = -errno;
+ }
+ goto out;
}
if (rec->file_new)
@@ -309,11 +350,14 @@ try_again:
else {
if (!perf_evlist__equal(session->evlist, evlist)) {
fprintf(stderr, "incompatible append\n");
- exit(-1);
+ rc = -1;
+ goto out;
}
}
perf_session__set_id_hdr_size(session);
+out:
+ return rc;
}
static int process_buildids(struct perf_record *rec)
@@ -329,10 +373,13 @@ static int process_buildids(struct perf_record *rec)
size, &build_id__mark_dso_hit_ops);
}
-static void perf_record__exit(int status __used, void *arg)
+static void perf_record__exit(int status, void *arg)
{
struct perf_record *rec = arg;
+ if (status != 0)
+ return;
+
if (!rec->opts.pipe_output) {
rec->session->header.data_size += rec->bytes_written;
@@ -387,17 +434,26 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
-static void perf_record__mmap_read_all(struct perf_record *rec)
+static int perf_record__mmap_read_all(struct perf_record *rec)
{
int i;
+ int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
- if (rec->evlist->mmap[i].base)
- perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
+ if (rec->evlist->mmap[i].base) {
+ if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
+ rc = -1;
+ goto out;
+ }
+ }
}
if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
- write_output(rec, &finished_round_event, sizeof(finished_round_event));
+ rc = write_output(rec, &finished_round_event,
+ sizeof(finished_round_event));
+
+out:
+ return rc;
}
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
@@ -457,7 +513,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
output = open(output_name, flags, S_IRUSR | S_IWUSR);
if (output < 0) {
perror("failed to create output file");
- exit(-1);
+ return -1;
}
rec->output = output;
@@ -497,7 +553,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
}
}
- perf_record__open(rec);
+ if (perf_record__open(rec) != 0) {
+ err = -1;
+ goto out_delete_session;
+ }
/*
* perf_session__delete(session) will be called at perf_record__exit()
@@ -507,19 +566,20 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
if (opts->pipe_output) {
err = perf_header__write_pipe(output);
if (err < 0)
- return err;
+ goto out_delete_session;
} else if (rec->file_new) {
err = perf_session__write_header(session, evsel_list,
output, false);
if (err < 0)
- return err;
+ goto out_delete_session;
}
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
- return -1;
+ err = -1;
+ goto out_delete_session;
}
rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
@@ -527,7 +587,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
machine = perf_session__find_host_machine(session);
if (!machine) {
pr_err("Couldn't find native kernel information.\n");
- return -1;
+ err = -1;
+ goto out_delete_session;
}
if (opts->pipe_output) {
@@ -535,14 +596,14 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
- return err;
+ goto out_delete_session;
}
err = perf_event__synthesize_event_types(tool, process_synthesized_event,
machine);
if (err < 0) {
pr_err("Couldn't synthesize event_types.\n");
- return err;
+ goto out_delete_session;
}
if (have_tracepoints(&evsel_list->entries)) {
@@ -558,7 +619,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
- return err;
+ goto out_delete_session;
}
advance_output(rec, err);
}
@@ -586,20 +647,24 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
perf_event__synthesize_guest_os);
if (!opts->target.system_wide)
- perf_event__synthesize_thread_map(tool, evsel_list->threads,
+ err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
process_synthesized_event,
machine);
else
- perf_event__synthesize_threads(tool, process_synthesized_event,
+ err = perf_event__synthesize_threads(tool, process_synthesized_event,
machine);
+ if (err != 0)
+ goto out_delete_session;
+
if (rec->realtime_prio) {
struct sched_param param;
param.sched_priority = rec->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
pr_err("Could not set realtime priority.\n");
- exit(-1);
+ err = -1;
+ goto out_delete_session;
}
}
@@ -614,7 +679,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
for (;;) {
int hits = rec->samples;
- perf_record__mmap_read_all(rec);
+ if (perf_record__mmap_read_all(rec) < 0) {
+ err = -1;
+ goto out_delete_session;
+ }
if (hits == rec->samples) {
if (done)
@@ -732,6 +800,106 @@ error:
return ret;
}
+#ifndef NO_LIBUNWIND_SUPPORT
+static int get_stack_size(char *str, unsigned long *_size)
+{
+ char *endptr;
+ unsigned long size;
+ unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
+
+ size = strtoul(str, &endptr, 0);
+
+ do {
+ if (*endptr)
+ break;
+
+ size = round_up(size, sizeof(u64));
+ if (!size || size > max_size)
+ break;
+
+ *_size = size;
+ return 0;
+
+ } while (0);
+
+ pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
+ max_size, str);
+ return -1;
+}
+#endif /* !NO_LIBUNWIND_SUPPORT */
+
+static int
+parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
+ int unset)
+{
+ struct perf_record *rec = (struct perf_record *)opt->value;
+ char *tok, *name, *saveptr = NULL;
+ char *buf;
+ int ret = -1;
+
+ /* --no-call-graph */
+ if (unset)
+ return 0;
+
+ /* We specified default option if none is provided. */
+ BUG_ON(!arg);
+
+ /* We need buffer that we know we can write to. */
+ buf = malloc(strlen(arg) + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ strcpy(buf, arg);
+
+ tok = strtok_r((char *)buf, ",", &saveptr);
+ name = tok ? : (char *)buf;
+
+ do {
+ /* Framepointer style */
+ if (!strncmp(name, "fp", sizeof("fp"))) {
+ if (!strtok_r(NULL, ",", &saveptr)) {
+ rec->opts.call_graph = CALLCHAIN_FP;
+ ret = 0;
+ } else
+ pr_err("callchain: No more arguments "
+ "needed for -g fp\n");
+ break;
+
+#ifndef NO_LIBUNWIND_SUPPORT
+ /* Dwarf style */
+ } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
+ ret = 0;
+ rec->opts.call_graph = CALLCHAIN_DWARF;
+ rec->opts.stack_dump_size = default_stack_dump_size;
+
+ tok = strtok_r(NULL, ",", &saveptr);
+ if (tok) {
+ unsigned long size = 0;
+
+ ret = get_stack_size(tok, &size);
+ rec->opts.stack_dump_size = size;
+ }
+
+ if (!ret)
+ pr_debug("callchain: stack dump size %d\n",
+ rec->opts.stack_dump_size);
+#endif /* !NO_LIBUNWIND_SUPPORT */
+ } else {
+ pr_err("callchain: Unknown -g option "
+ "value: %s\n", arg);
+ break;
+ }
+
+ } while (0);
+
+ free(buf);
+
+ if (!ret)
+ pr_debug("callchain: type %d\n", rec->opts.call_graph);
+
+ return ret;
+}
+
static const char * const record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
@@ -803,8 +971,9 @@ const struct option record_options[] = {
"number of mmap data pages"),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
- OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
- "do call-graph (stack chain/backtrace) recording"),
+ OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]",
+ callchain_help, &parse_callchain_opt,
+ "fp"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
@@ -836,7 +1005,7 @@ const struct option record_options[] = {
OPT_END()
};
-int cmd_record(int argc, const char **argv, const char *prefix __used)
+int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
{
int err = -ENOMEM;
struct perf_evsel *pos;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 7c88a243b5d..1da243dfbc3 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -69,8 +69,8 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain)
&& sample->callchain) {
- err = machine__resolve_callchain(machine, al->thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al->thread,
+ sample, &parent);
if (err)
return err;
}
@@ -93,7 +93,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
struct annotation *notes;
err = -ENOMEM;
bx = he->branch_info;
- if (bx->from.sym && use_browser > 0) {
+ if (bx->from.sym && use_browser == 1 && sort__has_sym) {
notes = symbol__annotation(bx->from.sym);
if (!notes->src
&& symbol__alloc_hist(bx->from.sym) < 0)
@@ -107,7 +107,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
goto out;
}
- if (bx->to.sym && use_browser > 0) {
+ if (bx->to.sym && use_browser == 1 && sort__has_sym) {
notes = symbol__annotation(bx->to.sym);
if (!notes->src
&& symbol__alloc_hist(bx->to.sym) < 0)
@@ -140,8 +140,8 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
- err = machine__resolve_callchain(machine, al->thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al->thread,
+ sample, &parent);
if (err)
return err;
}
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
* so we don't allocated the extra space needed because the stdio
* code will not use it.
*/
- if (he->ms.sym != NULL && use_browser > 0) {
+ if (he->ms.sym != NULL && use_browser == 1 && sort__has_sym) {
struct annotation *notes = symbol__annotation(he->ms.sym);
assert(evsel != NULL);
@@ -223,9 +223,9 @@ static int process_sample_event(struct perf_tool *tool,
static int process_read_event(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel,
- struct machine *machine __used)
+ struct machine *machine __maybe_unused)
{
struct perf_report *rep = container_of(tool, struct perf_report, tool);
@@ -287,7 +287,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
extern volatile int session_done;
-static void sig_handler(int sig __used)
+static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
@@ -397,17 +397,17 @@ static int __cmd_report(struct perf_report *rep)
desc);
}
- if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- goto out_delete;
- }
-
if (verbose > 3)
perf_session__fprintf(session, stdout);
if (verbose > 2)
perf_session__fprintf_dsos(session, stdout);
+ if (dump_trace) {
+ perf_session__fprintf_nr_events(session, stdout);
+ goto out_delete;
+ }
+
nr_samples = 0;
list_for_each_entry(pos, &session->evlist->entries, node) {
struct hists *hists = &pos->hists;
@@ -533,13 +533,14 @@ setup:
}
static int
-parse_branch_mode(const struct option *opt __used, const char *str __used, int unset)
+parse_branch_mode(const struct option *opt __maybe_unused,
+ const char *str __maybe_unused, int unset)
{
sort__branch_mode = !unset;
return 0;
}
-int cmd_report(int argc, const char **argv, const char *prefix __used)
+int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_session *session;
struct stat st;
@@ -638,6 +639,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
"Show a column with the sum of periods"),
OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "",
"use branch records for histogram filling", parse_branch_mode),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_END()
};
@@ -686,15 +689,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
if (strcmp(report.input_name, "-") != 0)
setup_browser(true);
- else
+ else {
use_browser = 0;
+ perf_hpp__init(false, false);
+ }
+
+ setup_sorting(report_usage, options);
/*
* Only in the newt browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
- if (use_browser > 0) {
+ if (use_browser == 1 && sort__has_sym) {
symbol_conf.priv_size = sizeof(struct annotation);
report.annotate_init = symbol__annotate_init;
/*
@@ -717,8 +724,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
if (symbol__init() < 0)
goto error;
- setup_sorting(report_usage, options);
-
if (parent_pattern != default_parent_pattern) {
if (sort_dimension__add("parent") < 0)
goto error;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a9ad2b1ee7..9b9e32eaa80 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -23,31 +23,12 @@
#include <pthread.h>
#include <math.h>
-static const char *input_name;
-
-static char default_sort_order[] = "avg, max, switch, runtime";
-static const char *sort_order = default_sort_order;
-
-static int profile_cpu = -1;
-
#define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096
-
-static u64 run_measurement_overhead;
-static u64 sleep_measurement_overhead;
-
#define COMM_LEN 20
#define SYM_LEN 129
-
#define MAX_PID 65536
-static unsigned long nr_tasks;
-
-struct perf_sched {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
struct sched_atom;
struct task_desc {
@@ -85,44 +66,6 @@ struct sched_atom {
struct task_desc *wakee;
};
-static struct task_desc *pid_to_task[MAX_PID];
-
-static struct task_desc **tasks;
-
-static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
-static u64 start_time;
-
-static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-static unsigned long nr_run_events;
-static unsigned long nr_sleep_events;
-static unsigned long nr_wakeup_events;
-
-static unsigned long nr_sleep_corrections;
-static unsigned long nr_run_events_optimized;
-
-static unsigned long targetless_wakeups;
-static unsigned long multitarget_wakeups;
-
-static u64 cpu_usage;
-static u64 runavg_cpu_usage;
-static u64 parent_cpu_usage;
-static u64 runavg_parent_cpu_usage;
-
-static unsigned long nr_runs;
-static u64 sum_runtime;
-static u64 sum_fluct;
-static u64 run_avg;
-
-static unsigned int replay_repeat = 10;
-static unsigned long nr_timestamps;
-static unsigned long nr_unordered_timestamps;
-static unsigned long nr_state_machine_bugs;
-static unsigned long nr_context_switch_bugs;
-static unsigned long nr_events;
-static unsigned long nr_lost_chunks;
-static unsigned long nr_lost_events;
-
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
enum thread_state {
@@ -154,11 +97,79 @@ struct work_atoms {
typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
-static struct rb_root atom_root, sorted_atom_root;
+struct perf_sched;
+
+struct trace_sched_handler {
+ int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
-static u64 all_runtime;
-static u64 all_count;
+ int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+ int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+
+ int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample);
+
+ int (*migrate_task_event)(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
+};
+
+struct perf_sched {
+ struct perf_tool tool;
+ const char *input_name;
+ const char *sort_order;
+ unsigned long nr_tasks;
+ struct task_desc *pid_to_task[MAX_PID];
+ struct task_desc **tasks;
+ const struct trace_sched_handler *tp_handler;
+ pthread_mutex_t start_work_mutex;
+ pthread_mutex_t work_done_wait_mutex;
+ int profile_cpu;
+/*
+ * Track the current task - that way we can know whether there's any
+ * weird events, such as a task being switched away that is not current.
+ */
+ int max_cpu;
+ u32 curr_pid[MAX_CPUS];
+ struct thread *curr_thread[MAX_CPUS];
+ char next_shortname1;
+ char next_shortname2;
+ unsigned int replay_repeat;
+ unsigned long nr_run_events;
+ unsigned long nr_sleep_events;
+ unsigned long nr_wakeup_events;
+ unsigned long nr_sleep_corrections;
+ unsigned long nr_run_events_optimized;
+ unsigned long targetless_wakeups;
+ unsigned long multitarget_wakeups;
+ unsigned long nr_runs;
+ unsigned long nr_timestamps;
+ unsigned long nr_unordered_timestamps;
+ unsigned long nr_state_machine_bugs;
+ unsigned long nr_context_switch_bugs;
+ unsigned long nr_events;
+ unsigned long nr_lost_chunks;
+ unsigned long nr_lost_events;
+ u64 run_measurement_overhead;
+ u64 sleep_measurement_overhead;
+ u64 start_time;
+ u64 cpu_usage;
+ u64 runavg_cpu_usage;
+ u64 parent_cpu_usage;
+ u64 runavg_parent_cpu_usage;
+ u64 sum_runtime;
+ u64 sum_fluct;
+ u64 run_avg;
+ u64 all_runtime;
+ u64 all_count;
+ u64 cpu_last_switched[MAX_CPUS];
+ struct rb_root atom_root, sorted_atom_root;
+ struct list_head sort_list, cmp_pid;
+};
static u64 get_nsecs(void)
{
@@ -169,13 +180,13 @@ static u64 get_nsecs(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
-static void burn_nsecs(u64 nsecs)
+static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
{
u64 T0 = get_nsecs(), T1;
do {
T1 = get_nsecs();
- } while (T1 + run_measurement_overhead < T0 + nsecs);
+ } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
}
static void sleep_nsecs(u64 nsecs)
@@ -188,24 +199,24 @@ static void sleep_nsecs(u64 nsecs)
nanosleep(&ts, NULL);
}
-static void calibrate_run_measurement_overhead(void)
+static void calibrate_run_measurement_overhead(struct perf_sched *sched)
{
u64 T0, T1, delta, min_delta = 1000000000ULL;
int i;
for (i = 0; i < 10; i++) {
T0 = get_nsecs();
- burn_nsecs(0);
+ burn_nsecs(sched, 0);
T1 = get_nsecs();
delta = T1-T0;
min_delta = min(min_delta, delta);
}
- run_measurement_overhead = min_delta;
+ sched->run_measurement_overhead = min_delta;
printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
-static void calibrate_sleep_measurement_overhead(void)
+static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
{
u64 T0, T1, delta, min_delta = 1000000000ULL;
int i;
@@ -218,7 +229,7 @@ static void calibrate_sleep_measurement_overhead(void)
min_delta = min(min_delta, delta);
}
min_delta -= 10000;
- sleep_measurement_overhead = min_delta;
+ sched->sleep_measurement_overhead = min_delta;
printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
@@ -251,8 +262,8 @@ static struct sched_atom *last_event(struct task_desc *task)
return task->atoms[task->nr_events - 1];
}
-static void
-add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
+static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
+ u64 timestamp, u64 duration)
{
struct sched_atom *event, *curr_event = last_event(task);
@@ -261,7 +272,7 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
* to it:
*/
if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
- nr_run_events_optimized++;
+ sched->nr_run_events_optimized++;
curr_event->duration += duration;
return;
}
@@ -271,12 +282,11 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
event->type = SCHED_EVENT_RUN;
event->duration = duration;
- nr_run_events++;
+ sched->nr_run_events++;
}
-static void
-add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
- struct task_desc *wakee)
+static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
+ u64 timestamp, struct task_desc *wakee)
{
struct sched_atom *event, *wakee_event;
@@ -286,11 +296,11 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
wakee_event = last_event(wakee);
if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
- targetless_wakeups++;
+ sched->targetless_wakeups++;
return;
}
if (wakee_event->wait_sem) {
- multitarget_wakeups++;
+ sched->multitarget_wakeups++;
return;
}
@@ -299,89 +309,89 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
wakee_event->specific_wait = 1;
event->wait_sem = wakee_event->wait_sem;
- nr_wakeup_events++;
+ sched->nr_wakeup_events++;
}
-static void
-add_sched_event_sleep(struct task_desc *task, u64 timestamp,
- u64 task_state __used)
+static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
+ u64 timestamp, u64 task_state __maybe_unused)
{
struct sched_atom *event = get_new_event(task, timestamp);
event->type = SCHED_EVENT_SLEEP;
- nr_sleep_events++;
+ sched->nr_sleep_events++;
}
-static struct task_desc *register_pid(unsigned long pid, const char *comm)
+static struct task_desc *register_pid(struct perf_sched *sched,
+ unsigned long pid, const char *comm)
{
struct task_desc *task;
BUG_ON(pid >= MAX_PID);
- task = pid_to_task[pid];
+ task = sched->pid_to_task[pid];
if (task)
return task;
task = zalloc(sizeof(*task));
task->pid = pid;
- task->nr = nr_tasks;
+ task->nr = sched->nr_tasks;
strcpy(task->comm, comm);
/*
* every task starts in sleeping state - this gets ignored
* if there's no wakeup pointing to this sleep state:
*/
- add_sched_event_sleep(task, 0, 0);
+ add_sched_event_sleep(sched, task, 0, 0);
- pid_to_task[pid] = task;
- nr_tasks++;
- tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
- BUG_ON(!tasks);
- tasks[task->nr] = task;
+ sched->pid_to_task[pid] = task;
+ sched->nr_tasks++;
+ sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
+ BUG_ON(!sched->tasks);
+ sched->tasks[task->nr] = task;
if (verbose)
- printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
+ printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
return task;
}
-static void print_task_traces(void)
+static void print_task_traces(struct perf_sched *sched)
{
struct task_desc *task;
unsigned long i;
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
task->nr, task->comm, task->pid, task->nr_events);
}
}
-static void add_cross_task_wakeups(void)
+static void add_cross_task_wakeups(struct perf_sched *sched)
{
struct task_desc *task1, *task2;
unsigned long i, j;
- for (i = 0; i < nr_tasks; i++) {
- task1 = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task1 = sched->tasks[i];
j = i + 1;
- if (j == nr_tasks)
+ if (j == sched->nr_tasks)
j = 0;
- task2 = tasks[j];
- add_sched_event_wakeup(task1, 0, task2);
+ task2 = sched->tasks[j];
+ add_sched_event_wakeup(sched, task1, 0, task2);
}
}
-static void
-process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
+static void perf_sched__process_event(struct perf_sched *sched,
+ struct sched_atom *atom)
{
int ret = 0;
switch (atom->type) {
case SCHED_EVENT_RUN:
- burn_nsecs(atom->duration);
+ burn_nsecs(sched, atom->duration);
break;
case SCHED_EVENT_SLEEP:
if (atom->wait_sem)
@@ -428,8 +438,8 @@ static int self_open_counters(void)
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd < 0)
- die("Error: sys_perf_event_open() syscall returned"
- "with %d (%s)\n", fd, strerror(errno));
+ pr_err("Error: sys_perf_event_open() syscall returned "
+ "with %d (%s)\n", fd, strerror(errno));
return fd;
}
@@ -444,31 +454,41 @@ static u64 get_cpu_usage_nsec_self(int fd)
return runtime;
}
+struct sched_thread_parms {
+ struct task_desc *task;
+ struct perf_sched *sched;
+};
+
static void *thread_func(void *ctx)
{
- struct task_desc *this_task = ctx;
+ struct sched_thread_parms *parms = ctx;
+ struct task_desc *this_task = parms->task;
+ struct perf_sched *sched = parms->sched;
u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret;
char comm2[22];
int fd;
+ free(parms);
+
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
fd = self_open_counters();
-
+ if (fd < 0)
+ return NULL;
again:
ret = sem_post(&this_task->ready_for_work);
BUG_ON(ret);
- ret = pthread_mutex_lock(&start_work_mutex);
+ ret = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(ret);
- ret = pthread_mutex_unlock(&start_work_mutex);
+ ret = pthread_mutex_unlock(&sched->start_work_mutex);
BUG_ON(ret);
cpu_usage_0 = get_cpu_usage_nsec_self(fd);
for (i = 0; i < this_task->nr_events; i++) {
this_task->curr_event = i;
- process_sched_event(this_task, this_task->atoms[i]);
+ perf_sched__process_event(sched, this_task->atoms[i]);
}
cpu_usage_1 = get_cpu_usage_nsec_self(fd);
@@ -476,15 +496,15 @@ again:
ret = sem_post(&this_task->work_done_sem);
BUG_ON(ret);
- ret = pthread_mutex_lock(&work_done_wait_mutex);
+ ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
BUG_ON(ret);
- ret = pthread_mutex_unlock(&work_done_wait_mutex);
+ ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
BUG_ON(ret);
goto again;
}
-static void create_tasks(void)
+static void create_tasks(struct perf_sched *sched)
{
struct task_desc *task;
pthread_attr_t attr;
@@ -496,128 +516,129 @@ static void create_tasks(void)
err = pthread_attr_setstacksize(&attr,
(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
BUG_ON(err);
- err = pthread_mutex_lock(&start_work_mutex);
+ err = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(err);
- err = pthread_mutex_lock(&work_done_wait_mutex);
+ err = pthread_mutex_lock(&sched->work_done_wait_mutex);
BUG_ON(err);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ struct sched_thread_parms *parms = malloc(sizeof(*parms));
+ BUG_ON(parms == NULL);
+ parms->task = task = sched->tasks[i];
+ parms->sched = sched;
sem_init(&task->sleep_sem, 0, 0);
sem_init(&task->ready_for_work, 0, 0);
sem_init(&task->work_done_sem, 0, 0);
task->curr_event = 0;
- err = pthread_create(&task->thread, &attr, thread_func, task);
+ err = pthread_create(&task->thread, &attr, thread_func, parms);
BUG_ON(err);
}
}
-static void wait_for_tasks(void)
+static void wait_for_tasks(struct perf_sched *sched)
{
u64 cpu_usage_0, cpu_usage_1;
struct task_desc *task;
unsigned long i, ret;
- start_time = get_nsecs();
- cpu_usage = 0;
- pthread_mutex_unlock(&work_done_wait_mutex);
+ sched->start_time = get_nsecs();
+ sched->cpu_usage = 0;
+ pthread_mutex_unlock(&sched->work_done_wait_mutex);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
ret = sem_wait(&task->ready_for_work);
BUG_ON(ret);
sem_init(&task->ready_for_work, 0, 0);
}
- ret = pthread_mutex_lock(&work_done_wait_mutex);
+ ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
BUG_ON(ret);
cpu_usage_0 = get_cpu_usage_nsec_parent();
- pthread_mutex_unlock(&start_work_mutex);
+ pthread_mutex_unlock(&sched->start_work_mutex);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
ret = sem_wait(&task->work_done_sem);
BUG_ON(ret);
sem_init(&task->work_done_sem, 0, 0);
- cpu_usage += task->cpu_usage;
+ sched->cpu_usage += task->cpu_usage;
task->cpu_usage = 0;
}
cpu_usage_1 = get_cpu_usage_nsec_parent();
- if (!runavg_cpu_usage)
- runavg_cpu_usage = cpu_usage;
- runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
+ if (!sched->runavg_cpu_usage)
+ sched->runavg_cpu_usage = sched->cpu_usage;
+ sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
- parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
- if (!runavg_parent_cpu_usage)
- runavg_parent_cpu_usage = parent_cpu_usage;
- runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
- parent_cpu_usage)/10;
+ sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
+ if (!sched->runavg_parent_cpu_usage)
+ sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
+ sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
+ sched->parent_cpu_usage)/10;
- ret = pthread_mutex_lock(&start_work_mutex);
+ ret = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(ret);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
sem_init(&task->sleep_sem, 0, 0);
task->curr_event = 0;
}
}
-static void run_one_test(void)
+static void run_one_test(struct perf_sched *sched)
{
u64 T0, T1, delta, avg_delta, fluct;
T0 = get_nsecs();
- wait_for_tasks();
+ wait_for_tasks(sched);
T1 = get_nsecs();
delta = T1 - T0;
- sum_runtime += delta;
- nr_runs++;
+ sched->sum_runtime += delta;
+ sched->nr_runs++;
- avg_delta = sum_runtime / nr_runs;
+ avg_delta = sched->sum_runtime / sched->nr_runs;
if (delta < avg_delta)
fluct = avg_delta - delta;
else
fluct = delta - avg_delta;
- sum_fluct += fluct;
- if (!run_avg)
- run_avg = delta;
- run_avg = (run_avg*9 + delta)/10;
+ sched->sum_fluct += fluct;
+ if (!sched->run_avg)
+ sched->run_avg = delta;
+ sched->run_avg = (sched->run_avg * 9 + delta) / 10;
- printf("#%-3ld: %0.3f, ",
- nr_runs, (double)delta/1000000.0);
+ printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
- printf("ravg: %0.2f, ",
- (double)run_avg/1e6);
+ printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
printf("cpu: %0.2f / %0.2f",
- (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
+ (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
#if 0
/*
* rusage statistics done by the parent, these are less
- * accurate than the sum_exec_runtime based statistics:
+ * accurate than the sched->sum_exec_runtime based statistics:
*/
printf(" [%0.2f / %0.2f]",
- (double)parent_cpu_usage/1e6,
- (double)runavg_parent_cpu_usage/1e6);
+ (double)sched->parent_cpu_usage/1e6,
+ (double)sched->runavg_parent_cpu_usage/1e6);
#endif
printf("\n");
- if (nr_sleep_corrections)
- printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
- nr_sleep_corrections = 0;
+ if (sched->nr_sleep_corrections)
+ printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
+ sched->nr_sleep_corrections = 0;
}
-static void test_calibrations(void)
+static void test_calibrations(struct perf_sched *sched)
{
u64 T0, T1;
T0 = get_nsecs();
- burn_nsecs(1e6);
+ burn_nsecs(sched, 1e6);
T1 = get_nsecs();
printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
@@ -629,236 +650,92 @@ static void test_calibrations(void)
printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
}
-#define FILL_FIELD(ptr, field, event, data) \
- ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
-
-#define FILL_ARRAY(ptr, array, event, data) \
-do { \
- void *__array = raw_field_ptr(event, #array, data); \
- memcpy(ptr.array, __array, sizeof(ptr.array)); \
-} while(0)
-
-#define FILL_COMMON_FIELDS(ptr, event, data) \
-do { \
- FILL_FIELD(ptr, common_type, event, data); \
- FILL_FIELD(ptr, common_flags, event, data); \
- FILL_FIELD(ptr, common_preempt_count, event, data); \
- FILL_FIELD(ptr, common_pid, event, data); \
- FILL_FIELD(ptr, common_tgid, event, data); \
-} while (0)
-
-
-
-struct trace_switch_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char prev_comm[16];
- u32 prev_pid;
- u32 prev_prio;
- u64 prev_state;
- char next_comm[16];
- u32 next_pid;
- u32 next_prio;
-};
-
-struct trace_runtime_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char comm[16];
- u32 pid;
- u64 runtime;
- u64 vruntime;
-};
-
-struct trace_wakeup_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char comm[16];
- u32 pid;
-
- u32 prio;
- u32 success;
- u32 cpu;
-};
-
-struct trace_fork_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char parent_comm[16];
- u32 parent_pid;
- char child_comm[16];
- u32 child_pid;
-};
-
-struct trace_migrate_task_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char comm[16];
- u32 pid;
-
- u32 prio;
- u32 cpu;
-};
-
-struct trace_sched_handler {
- void (*switch_event)(struct trace_switch_event *,
- struct machine *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*runtime_event)(struct trace_runtime_event *,
- struct machine *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*wakeup_event)(struct trace_wakeup_event *,
- struct machine *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*fork_event)(struct trace_fork_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*migrate_task_event)(struct trace_migrate_task_event *,
- struct machine *machine,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-};
-
-
-static void
-replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct machine *machine __used,
- struct event_format *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int
+replay_wakeup_event(struct perf_sched *sched,
+ struct perf_evsel *evsel, struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
{
+ const char *comm = perf_evsel__strval(evsel, sample, "comm");
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
struct task_desc *waker, *wakee;
if (verbose) {
- printf("sched_wakeup event %p\n", event);
+ printf("sched_wakeup event %p\n", evsel);
- printf(" ... pid %d woke up %s/%d\n",
- wakeup_event->common_pid,
- wakeup_event->comm,
- wakeup_event->pid);
+ printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
}
- waker = register_pid(wakeup_event->common_pid, "<unknown>");
- wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
+ waker = register_pid(sched, sample->tid, "<unknown>");
+ wakee = register_pid(sched, pid, comm);
- add_sched_event_wakeup(waker, timestamp, wakee);
+ add_sched_event_wakeup(sched, waker, sample->time, wakee);
+ return 0;
}
-static u64 cpu_last_switched[MAX_CPUS];
-
-static void
-replay_switch_event(struct trace_switch_event *switch_event,
- struct machine *machine __used,
- struct event_format *event,
- int cpu,
- u64 timestamp,
- struct thread *thread __used)
+static int replay_switch_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
{
- struct task_desc *prev, __used *next;
- u64 timestamp0;
+ const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
+ *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
+ const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
+ struct task_desc *prev, __maybe_unused *next;
+ u64 timestamp0, timestamp = sample->time;
+ int cpu = sample->cpu;
s64 delta;
if (verbose)
- printf("sched_switch event %p\n", event);
+ printf("sched_switch event %p\n", evsel);
if (cpu >= MAX_CPUS || cpu < 0)
- return;
+ return 0;
- timestamp0 = cpu_last_switched[cpu];
+ timestamp0 = sched->cpu_last_switched[cpu];
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
- if (delta < 0)
- die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
-
- if (verbose) {
- printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
- switch_event->prev_comm, switch_event->prev_pid,
- switch_event->next_comm, switch_event->next_pid,
- delta);
+ if (delta < 0) {
+ pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
+ return -1;
}
- prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
- next = register_pid(switch_event->next_pid, switch_event->next_comm);
+ pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
+ prev_comm, prev_pid, next_comm, next_pid, delta);
- cpu_last_switched[cpu] = timestamp;
+ prev = register_pid(sched, prev_pid, prev_comm);
+ next = register_pid(sched, next_pid, next_comm);
- add_sched_event_run(prev, timestamp, delta);
- add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
-}
+ sched->cpu_last_switched[cpu] = timestamp;
+ add_sched_event_run(sched, prev, timestamp, delta);
+ add_sched_event_sleep(sched, prev, timestamp, prev_state);
-static void
-replay_fork_event(struct trace_fork_event *fork_event,
- struct event_format *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+ return 0;
+}
+
+static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
+ *child_comm = perf_evsel__strval(evsel, sample, "child_comm");
+ const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"),
+ child_pid = perf_evsel__intval(evsel, sample, "child_pid");
+
if (verbose) {
- printf("sched_fork event %p\n", event);
- printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
- printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
+ printf("sched_fork event %p\n", evsel);
+ printf("... parent: %s/%d\n", parent_comm, parent_pid);
+ printf("... child: %s/%d\n", child_comm, child_pid);
}
- register_pid(fork_event->parent_pid, fork_event->parent_comm);
- register_pid(fork_event->child_pid, fork_event->child_comm);
-}
-static struct trace_sched_handler replay_ops = {
- .wakeup_event = replay_wakeup_event,
- .switch_event = replay_switch_event,
- .fork_event = replay_fork_event,
-};
+ register_pid(sched, parent_pid, parent_comm);
+ register_pid(sched, child_pid, child_comm);
+ return 0;
+}
struct sort_dimension {
const char *name;
@@ -866,8 +743,6 @@ struct sort_dimension {
struct list_head list;
};
-static LIST_HEAD(cmp_pid);
-
static int
thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
{
@@ -936,43 +811,45 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
rb_insert_color(&data->node, root);
}
-static void thread_atoms_insert(struct thread *thread)
+static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
{
struct work_atoms *atoms = zalloc(sizeof(*atoms));
- if (!atoms)
- die("No memory");
+ if (!atoms) {
+ pr_err("No memory at %s\n", __func__);
+ return -1;
+ }
atoms->thread = thread;
INIT_LIST_HEAD(&atoms->work_list);
- __thread_latency_insert(&atom_root, atoms, &cmp_pid);
+ __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
+ return 0;
}
-static void
-latency_fork_event(struct trace_fork_event *fork_event __used,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int latency_fork_event(struct perf_sched *sched __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
{
/* should insert the newcomer */
+ return 0;
}
-__used
-static char sched_out_state(struct trace_switch_event *switch_event)
+static char sched_out_state(u64 prev_state)
{
const char *str = TASK_STATE_TO_CHAR_STR;
- return str[switch_event->prev_state];
+ return str[prev_state];
}
-static void
+static int
add_sched_out_event(struct work_atoms *atoms,
char run_state,
u64 timestamp)
{
struct work_atom *atom = zalloc(sizeof(*atom));
- if (!atom)
- die("Non memory");
+ if (!atom) {
+ pr_err("Non memory at %s", __func__);
+ return -1;
+ }
atom->sched_out_time = timestamp;
@@ -982,10 +859,12 @@ add_sched_out_event(struct work_atoms *atoms,
}
list_add_tail(&atom->list, &atoms->work_list);
+ return 0;
}
static void
-add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
+add_runtime_event(struct work_atoms *atoms, u64 delta,
+ u64 timestamp __maybe_unused)
{
struct work_atom *atom;
@@ -1028,106 +907,128 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
atoms->nb_atoms++;
}
-static void
-latency_switch_event(struct trace_switch_event *switch_event,
- struct machine *machine,
- struct event_format *event __used,
- int cpu,
- u64 timestamp,
- struct thread *thread __used)
+static int latency_switch_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
+ const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
- u64 timestamp0;
+ u64 timestamp0, timestamp = sample->time;
+ int cpu = sample->cpu;
s64 delta;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
- timestamp0 = cpu_last_switched[cpu];
- cpu_last_switched[cpu] = timestamp;
+ timestamp0 = sched->cpu_last_switched[cpu];
+ sched->cpu_last_switched[cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
- if (delta < 0)
- die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
-
+ if (delta < 0) {
+ pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
+ return -1;
+ }
- sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
- sched_in = machine__findnew_thread(machine, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, prev_pid);
+ sched_in = machine__findnew_thread(machine, next_pid);
- out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
+ out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
- thread_atoms_insert(sched_out);
- out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
- if (!out_events)
- die("out-event: Internal tree error");
+ if (thread_atoms_insert(sched, sched_out))
+ return -1;
+ out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
+ if (!out_events) {
+ pr_err("out-event: Internal tree error");
+ return -1;
+ }
}
- add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
+ if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
+ return -1;
- in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
+ in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
- thread_atoms_insert(sched_in);
- in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
- if (!in_events)
- die("in-event: Internal tree error");
+ if (thread_atoms_insert(sched, sched_in))
+ return -1;
+ in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
+ if (!in_events) {
+ pr_err("in-event: Internal tree error");
+ return -1;
+ }
/*
* Take came in we have not heard about yet,
* add in an initial atom in runnable state:
*/
- add_sched_out_event(in_events, 'R', timestamp);
+ if (add_sched_out_event(in_events, 'R', timestamp))
+ return -1;
}
add_sched_in_event(in_events, timestamp);
+
+ return 0;
}
-static void
-latency_runtime_event(struct trace_runtime_event *runtime_event,
- struct machine *machine,
- struct event_format *event __used,
- int cpu,
- u64 timestamp,
- struct thread *this_thread __used)
+static int latency_runtime_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
- struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
+ struct thread *thread = machine__findnew_thread(machine, pid);
+ struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
+ u64 timestamp = sample->time;
+ int cpu = sample->cpu;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
if (!atoms) {
- thread_atoms_insert(thread);
- atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
- if (!atoms)
- die("in-event: Internal tree error");
- add_sched_out_event(atoms, 'R', timestamp);
+ if (thread_atoms_insert(sched, thread))
+ return -1;
+ atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
+ if (!atoms) {
+ pr_err("in-event: Internal tree error");
+ return -1;
+ }
+ if (add_sched_out_event(atoms, 'R', timestamp))
+ return -1;
}
- add_runtime_event(atoms, runtime_event->runtime, timestamp);
+ add_runtime_event(atoms, runtime, timestamp);
+ return 0;
}
-static void
-latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct machine *machine,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp,
- struct thread *thread __used)
+static int latency_wakeup_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid"),
+ success = perf_evsel__intval(evsel, sample, "success");
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *wakee;
+ u64 timestamp = sample->time;
/* Note for later, it may be interesting to observe the failing cases */
- if (!wakeup_event->success)
- return;
+ if (!success)
+ return 0;
- wakee = machine__findnew_thread(machine, wakeup_event->pid);
- atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
+ wakee = machine__findnew_thread(machine, pid);
+ atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
- thread_atoms_insert(wakee);
- atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
- if (!atoms)
- die("wakeup-event: Internal tree error");
- add_sched_out_event(atoms, 'S', timestamp);
+ if (thread_atoms_insert(sched, wakee))
+ return -1;
+ atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
+ if (!atoms) {
+ pr_err("wakeup-event: Internal tree error");
+ return -1;
+ }
+ if (add_sched_out_event(atoms, 'S', timestamp))
+ return -1;
}
BUG_ON(list_empty(&atoms->work_list));
@@ -1139,27 +1040,27 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
* one CPU, or are only looking at only one, so don't
* make useless noise.
*/
- if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
- nr_state_machine_bugs++;
+ if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
+ sched->nr_state_machine_bugs++;
- nr_timestamps++;
+ sched->nr_timestamps++;
if (atom->sched_out_time > timestamp) {
- nr_unordered_timestamps++;
- return;
+ sched->nr_unordered_timestamps++;
+ return 0;
}
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = timestamp;
+ return 0;
}
-static void
-latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
- struct machine *machine,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp,
- struct thread *thread __used)
+static int latency_migrate_task_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ u64 timestamp = sample->time;
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *migrant;
@@ -1167,18 +1068,22 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
/*
* Only need to worry about migration when profiling one CPU.
*/
- if (profile_cpu == -1)
- return;
+ if (sched->profile_cpu == -1)
+ return 0;
- migrant = machine__findnew_thread(machine, migrate_task_event->pid);
- atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+ migrant = machine__findnew_thread(machine, pid);
+ atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
- thread_atoms_insert(migrant);
- register_pid(migrant->pid, migrant->comm);
- atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
- if (!atoms)
- die("migration-event: Internal tree error");
- add_sched_out_event(atoms, 'R', timestamp);
+ if (thread_atoms_insert(sched, migrant))
+ return -1;
+ register_pid(sched, migrant->pid, migrant->comm);
+ atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
+ if (!atoms) {
+ pr_err("migration-event: Internal tree error");
+ return -1;
+ }
+ if (add_sched_out_event(atoms, 'R', timestamp))
+ return -1;
}
BUG_ON(list_empty(&atoms->work_list));
@@ -1186,21 +1091,15 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
- nr_timestamps++;
+ sched->nr_timestamps++;
if (atom->sched_out_time > timestamp)
- nr_unordered_timestamps++;
-}
+ sched->nr_unordered_timestamps++;
-static struct trace_sched_handler lat_ops = {
- .wakeup_event = latency_wakeup_event,
- .switch_event = latency_switch_event,
- .runtime_event = latency_runtime_event,
- .fork_event = latency_fork_event,
- .migrate_task_event = latency_migrate_task_event,
-};
+ return 0;
+}
-static void output_lat_thread(struct work_atoms *work_list)
+static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
{
int i;
int ret;
@@ -1214,8 +1113,8 @@ static void output_lat_thread(struct work_atoms *work_list)
if (!strcmp(work_list->thread->comm, "swapper"))
return;
- all_runtime += work_list->total_runtime;
- all_count += work_list->nb_atoms;
+ sched->all_runtime += work_list->total_runtime;
+ sched->all_count += work_list->nb_atoms;
ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
@@ -1241,11 +1140,6 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension pid_sort_dimension = {
- .name = "pid",
- .cmp = pid_cmp,
-};
-
static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
{
u64 avgl, avgr;
@@ -1267,11 +1161,6 @@ static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension avg_sort_dimension = {
- .name = "avg",
- .cmp = avg_cmp,
-};
-
static int max_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->max_lat < r->max_lat)
@@ -1282,11 +1171,6 @@ static int max_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension max_sort_dimension = {
- .name = "max",
- .cmp = max_cmp,
-};
-
static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->nb_atoms < r->nb_atoms)
@@ -1297,11 +1181,6 @@ static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension switch_sort_dimension = {
- .name = "switch",
- .cmp = switch_cmp,
-};
-
static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->total_runtime < r->total_runtime)
@@ -1312,28 +1191,38 @@ static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension runtime_sort_dimension = {
- .name = "runtime",
- .cmp = runtime_cmp,
-};
-
-static struct sort_dimension *available_sorts[] = {
- &pid_sort_dimension,
- &avg_sort_dimension,
- &max_sort_dimension,
- &switch_sort_dimension,
- &runtime_sort_dimension,
-};
-
-#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
-
-static LIST_HEAD(sort_list);
-
static int sort_dimension__add(const char *tok, struct list_head *list)
{
- int i;
+ size_t i;
+ static struct sort_dimension avg_sort_dimension = {
+ .name = "avg",
+ .cmp = avg_cmp,
+ };
+ static struct sort_dimension max_sort_dimension = {
+ .name = "max",
+ .cmp = max_cmp,
+ };
+ static struct sort_dimension pid_sort_dimension = {
+ .name = "pid",
+ .cmp = pid_cmp,
+ };
+ static struct sort_dimension runtime_sort_dimension = {
+ .name = "runtime",
+ .cmp = runtime_cmp,
+ };
+ static struct sort_dimension switch_sort_dimension = {
+ .name = "switch",
+ .cmp = switch_cmp,
+ };
+ struct sort_dimension *available_sorts[] = {
+ &pid_sort_dimension,
+ &avg_sort_dimension,
+ &max_sort_dimension,
+ &switch_sort_dimension,
+ &runtime_sort_dimension,
+ };
- for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
+ for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
if (!strcmp(available_sorts[i]->name, tok)) {
list_add_tail(&available_sorts[i]->list, list);
@@ -1344,126 +1233,97 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
return -1;
}
-static void setup_sorting(void);
-
-static void sort_lat(void)
+static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
for (;;) {
struct work_atoms *data;
- node = rb_first(&atom_root);
+ node = rb_first(&sched->atom_root);
if (!node)
break;
- rb_erase(node, &atom_root);
+ rb_erase(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
- __thread_latency_insert(&sorted_atom_root, data, &sort_list);
+ __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
}
-static struct trace_sched_handler *trace_handler;
-
-static void
-process_sched_wakeup_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_wakeup_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- void *data = sample->raw_data;
- struct trace_wakeup_event wakeup_event;
-
- FILL_COMMON_FIELDS(wakeup_event, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(wakeup_event, comm, event, data);
- FILL_FIELD(wakeup_event, pid, event, data);
- FILL_FIELD(wakeup_event, prio, event, data);
- FILL_FIELD(wakeup_event, success, event, data);
- FILL_FIELD(wakeup_event, cpu, event, data);
+ if (sched->tp_handler->wakeup_event)
+ return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
- if (trace_handler->wakeup_event)
- trace_handler->wakeup_event(&wakeup_event, machine, event,
- sample->cpu, sample->time, thread);
+ return 0;
}
-/*
- * Track the current task - that way we can know whether there's any
- * weird events, such as a task being switched away that is not current.
- */
-static int max_cpu;
-
-static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
-
-static struct thread *curr_thread[MAX_CPUS];
-
-static char next_shortname1 = 'A';
-static char next_shortname2 = '0';
-
-static void
-map_switch_event(struct trace_switch_event *switch_event,
- struct machine *machine,
- struct event_format *event __used,
- int this_cpu,
- u64 timestamp,
- struct thread *thread __used)
+static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine)
{
- struct thread *sched_out __used, *sched_in;
+ const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ struct thread *sched_out __maybe_unused, *sched_in;
int new_shortname;
- u64 timestamp0;
+ u64 timestamp0, timestamp = sample->time;
s64 delta;
- int cpu;
+ int cpu, this_cpu = sample->cpu;
BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
- if (this_cpu > max_cpu)
- max_cpu = this_cpu;
+ if (this_cpu > sched->max_cpu)
+ sched->max_cpu = this_cpu;
- timestamp0 = cpu_last_switched[this_cpu];
- cpu_last_switched[this_cpu] = timestamp;
+ timestamp0 = sched->cpu_last_switched[this_cpu];
+ sched->cpu_last_switched[this_cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
- if (delta < 0)
- die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
-
+ if (delta < 0) {
+ pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
+ return -1;
+ }
- sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
- sched_in = machine__findnew_thread(machine, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, prev_pid);
+ sched_in = machine__findnew_thread(machine, next_pid);
- curr_thread[this_cpu] = sched_in;
+ sched->curr_thread[this_cpu] = sched_in;
printf(" ");
new_shortname = 0;
if (!sched_in->shortname[0]) {
- sched_in->shortname[0] = next_shortname1;
- sched_in->shortname[1] = next_shortname2;
+ sched_in->shortname[0] = sched->next_shortname1;
+ sched_in->shortname[1] = sched->next_shortname2;
- if (next_shortname1 < 'Z') {
- next_shortname1++;
+ if (sched->next_shortname1 < 'Z') {
+ sched->next_shortname1++;
} else {
- next_shortname1='A';
- if (next_shortname2 < '9') {
- next_shortname2++;
+ sched->next_shortname1='A';
+ if (sched->next_shortname2 < '9') {
+ sched->next_shortname2++;
} else {
- next_shortname2='0';
+ sched->next_shortname2='0';
}
}
new_shortname = 1;
}
- for (cpu = 0; cpu <= max_cpu; cpu++) {
+ for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
if (cpu != this_cpu)
printf(" ");
else
printf("*");
- if (curr_thread[cpu]) {
- if (curr_thread[cpu]->pid)
- printf("%2s ", curr_thread[cpu]->shortname);
+ if (sched->curr_thread[cpu]) {
+ if (sched->curr_thread[cpu]->pid)
+ printf("%2s ", sched->curr_thread[cpu]->shortname);
else
printf(". ");
} else
@@ -1477,134 +1337,97 @@ map_switch_event(struct trace_switch_event *switch_event,
} else {
printf("\n");
}
+
+ return 0;
}
-static void
-process_sched_switch_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_switch_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- int this_cpu = sample->cpu;
- void *data = sample->raw_data;
- struct trace_switch_event switch_event;
-
- FILL_COMMON_FIELDS(switch_event, event, data);
-
- FILL_ARRAY(switch_event, prev_comm, event, data);
- FILL_FIELD(switch_event, prev_pid, event, data);
- FILL_FIELD(switch_event, prev_prio, event, data);
- FILL_FIELD(switch_event, prev_state, event, data);
- FILL_ARRAY(switch_event, next_comm, event, data);
- FILL_FIELD(switch_event, next_pid, event, data);
- FILL_FIELD(switch_event, next_prio, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ int this_cpu = sample->cpu, err = 0;
+ u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- if (curr_pid[this_cpu] != (u32)-1) {
+ if (sched->curr_pid[this_cpu] != (u32)-1) {
/*
* Are we trying to switch away a PID that is
* not current?
*/
- if (curr_pid[this_cpu] != switch_event.prev_pid)
- nr_context_switch_bugs++;
+ if (sched->curr_pid[this_cpu] != prev_pid)
+ sched->nr_context_switch_bugs++;
}
- if (trace_handler->switch_event)
- trace_handler->switch_event(&switch_event, machine, event,
- this_cpu, sample->time, thread);
- curr_pid[this_cpu] = switch_event.next_pid;
+ if (sched->tp_handler->switch_event)
+ err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
+
+ sched->curr_pid[this_cpu] = next_pid;
+ return err;
}
-static void
-process_sched_runtime_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_runtime_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- void *data = sample->raw_data;
- struct trace_runtime_event runtime_event;
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(runtime_event, comm, event, data);
- FILL_FIELD(runtime_event, pid, event, data);
- FILL_FIELD(runtime_event, runtime, event, data);
- FILL_FIELD(runtime_event, vruntime, event, data);
+ if (sched->tp_handler->runtime_event)
+ return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
- if (trace_handler->runtime_event)
- trace_handler->runtime_event(&runtime_event, machine, event,
- sample->cpu, sample->time, thread);
+ return 0;
}
-static void
-process_sched_fork_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine __used,
- struct thread *thread)
+static int process_sched_fork_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
{
- void *data = sample->raw_data;
- struct trace_fork_event fork_event;
-
- FILL_COMMON_FIELDS(fork_event, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(fork_event, parent_comm, event, data);
- FILL_FIELD(fork_event, parent_pid, event, data);
- FILL_ARRAY(fork_event, child_comm, event, data);
- FILL_FIELD(fork_event, child_pid, event, data);
+ if (sched->tp_handler->fork_event)
+ return sched->tp_handler->fork_event(sched, evsel, sample);
- if (trace_handler->fork_event)
- trace_handler->fork_event(&fork_event, event,
- sample->cpu, sample->time, thread);
+ return 0;
}
-static void
-process_sched_exit_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample __used,
- struct machine *machine __used,
- struct thread *thread __used)
+static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
- if (verbose)
- printf("sched_exit event %p\n", event);
+ pr_debug("sched_exit event %p\n", evsel);
+ return 0;
}
-static void
-process_sched_migrate_task_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_migrate_task_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- void *data = sample->raw_data;
- struct trace_migrate_task_event migrate_task_event;
-
- FILL_COMMON_FIELDS(migrate_task_event, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(migrate_task_event, comm, event, data);
- FILL_FIELD(migrate_task_event, pid, event, data);
- FILL_FIELD(migrate_task_event, prio, event, data);
- FILL_FIELD(migrate_task_event, cpu, event, data);
+ if (sched->tp_handler->migrate_task_event)
+ return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
- if (trace_handler->migrate_task_event)
- trace_handler->migrate_task_event(&migrate_task_event, machine,
- event, sample->cpu,
- sample->time, thread);
+ return 0;
}
-typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread);
+typedef int (*tracepoint_handler)(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
-static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
- union perf_event *event __used,
+static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- struct pevent *pevent = sched->session->pevent;
struct thread *thread = machine__findnew_thread(machine, sample->pid);
+ int err = 0;
if (thread == NULL) {
pr_debug("problem processing %s event, skipping it.\n",
@@ -1617,30 +1440,15 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
if (evsel->handler.func != NULL) {
tracepoint_handler f = evsel->handler.func;
-
- if (evsel->handler.data == NULL)
- evsel->handler.data = pevent_find_event(pevent,
- evsel->attr.config);
-
- f(tool, evsel->handler.data, sample, machine, thread);
+ err = f(tool, evsel, sample, machine);
}
- return 0;
+ return err;
}
-static struct perf_sched perf_sched = {
- .tool = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_event__process_task,
- .ordered_samples = true,
- },
-};
-
-static void read_events(bool destroy, struct perf_session **psession)
+static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
+ struct perf_session **psession)
{
- int err = -EINVAL;
const struct perf_evsel_str_handler handlers[] = {
{ "sched:sched_switch", process_sched_switch_event, },
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
@@ -1652,24 +1460,25 @@ static void read_events(bool destroy, struct perf_session **psession)
};
struct perf_session *session;
- session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_sched.tool);
- if (session == NULL)
- die("No Memory");
-
- perf_sched.session = session;
+ session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool);
+ if (session == NULL) {
+ pr_debug("No Memory for session\n");
+ return -1;
+ }
- err = perf_session__set_tracepoints_handlers(session, handlers);
- assert(err == 0);
+ if (perf_session__set_tracepoints_handlers(session, handlers))
+ goto out_delete;
if (perf_session__has_traces(session, "record -R")) {
- err = perf_session__process_events(session, &perf_sched.tool);
- if (err)
- die("Failed to process events, error %d", err);
+ int err = perf_session__process_events(session, &sched->tool);
+ if (err) {
+ pr_err("Failed to process events, error %d", err);
+ goto out_delete;
+ }
- nr_events = session->hists.stats.nr_events[0];
- nr_lost_events = session->hists.stats.total_lost;
- nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
+ sched->nr_events = session->hists.stats.nr_events[0];
+ sched->nr_lost_events = session->hists.stats.total_lost;
+ sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
}
if (destroy)
@@ -1677,208 +1486,166 @@ static void read_events(bool destroy, struct perf_session **psession)
if (psession)
*psession = session;
+
+ return 0;
+
+out_delete:
+ perf_session__delete(session);
+ return -1;
}
-static void print_bad_events(void)
+static void print_bad_events(struct perf_sched *sched)
{
- if (nr_unordered_timestamps && nr_timestamps) {
+ if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
- (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
- nr_unordered_timestamps, nr_timestamps);
+ (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
+ sched->nr_unordered_timestamps, sched->nr_timestamps);
}
- if (nr_lost_events && nr_events) {
+ if (sched->nr_lost_events && sched->nr_events) {
printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
- (double)nr_lost_events/(double)nr_events*100.0,
- nr_lost_events, nr_events, nr_lost_chunks);
+ (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
+ sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
}
- if (nr_state_machine_bugs && nr_timestamps) {
+ if (sched->nr_state_machine_bugs && sched->nr_timestamps) {
printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
- (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
- nr_state_machine_bugs, nr_timestamps);
- if (nr_lost_events)
+ (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0,
+ sched->nr_state_machine_bugs, sched->nr_timestamps);
+ if (sched->nr_lost_events)
printf(" (due to lost events?)");
printf("\n");
}
- if (nr_context_switch_bugs && nr_timestamps) {
+ if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
- (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
- nr_context_switch_bugs, nr_timestamps);
- if (nr_lost_events)
+ (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
+ sched->nr_context_switch_bugs, sched->nr_timestamps);
+ if (sched->nr_lost_events)
printf(" (due to lost events?)");
printf("\n");
}
}
-static void __cmd_lat(void)
+static int perf_sched__lat(struct perf_sched *sched)
{
struct rb_node *next;
struct perf_session *session;
setup_pager();
- read_events(false, &session);
- sort_lat();
+ if (perf_sched__read_events(sched, false, &session))
+ return -1;
+ perf_sched__sort_lat(sched);
printf("\n ---------------------------------------------------------------------------------------------------------------\n");
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" ---------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sorted_atom_root);
+ next = rb_first(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
work_list = rb_entry(next, struct work_atoms, node);
- output_lat_thread(work_list);
+ output_lat_thread(sched, work_list);
next = rb_next(next);
}
printf(" -----------------------------------------------------------------------------------------\n");
printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
- (double)all_runtime/1e6, all_count);
+ (double)sched->all_runtime / 1e6, sched->all_count);
printf(" ---------------------------------------------------\n");
- print_bad_events();
+ print_bad_events(sched);
printf("\n");
perf_session__delete(session);
+ return 0;
}
-static struct trace_sched_handler map_ops = {
- .wakeup_event = NULL,
- .switch_event = map_switch_event,
- .runtime_event = NULL,
- .fork_event = NULL,
-};
-
-static void __cmd_map(void)
+static int perf_sched__map(struct perf_sched *sched)
{
- max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+ sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
setup_pager();
- read_events(true, NULL);
- print_bad_events();
+ if (perf_sched__read_events(sched, true, NULL))
+ return -1;
+ print_bad_events(sched);
+ return 0;
}
-static void __cmd_replay(void)
+static int perf_sched__replay(struct perf_sched *sched)
{
unsigned long i;
- calibrate_run_measurement_overhead();
- calibrate_sleep_measurement_overhead();
+ calibrate_run_measurement_overhead(sched);
+ calibrate_sleep_measurement_overhead(sched);
- test_calibrations();
+ test_calibrations(sched);
- read_events(true, NULL);
+ if (perf_sched__read_events(sched, true, NULL))
+ return -1;
- printf("nr_run_events: %ld\n", nr_run_events);
- printf("nr_sleep_events: %ld\n", nr_sleep_events);
- printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
+ printf("nr_run_events: %ld\n", sched->nr_run_events);
+ printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
+ printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
- if (targetless_wakeups)
- printf("target-less wakeups: %ld\n", targetless_wakeups);
- if (multitarget_wakeups)
- printf("multi-target wakeups: %ld\n", multitarget_wakeups);
- if (nr_run_events_optimized)
+ if (sched->targetless_wakeups)
+ printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
+ if (sched->multitarget_wakeups)
+ printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
+ if (sched->nr_run_events_optimized)
printf("run atoms optimized: %ld\n",
- nr_run_events_optimized);
+ sched->nr_run_events_optimized);
- print_task_traces();
- add_cross_task_wakeups();
+ print_task_traces(sched);
+ add_cross_task_wakeups(sched);
- create_tasks();
+ create_tasks(sched);
printf("------------------------------------------------------------\n");
- for (i = 0; i < replay_repeat; i++)
- run_one_test();
-}
-
-
-static const char * const sched_usage[] = {
- "perf sched [<options>] {record|latency|map|replay|script}",
- NULL
-};
-
-static const struct option sched_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
-};
-
-static const char * const latency_usage[] = {
- "perf sched latency [<options>]",
- NULL
-};
-
-static const struct option latency_options[] = {
- OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): runtime, switch, avg, max"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_INTEGER('C', "CPU", &profile_cpu,
- "CPU to profile on"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
-};
-
-static const char * const replay_usage[] = {
- "perf sched replay [<options>]",
- NULL
-};
+ for (i = 0; i < sched->replay_repeat; i++)
+ run_one_test(sched);
-static const struct option replay_options[] = {
- OPT_UINTEGER('r', "repeat", &replay_repeat,
- "repeat the workload replay N times (-1: infinite)"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
-};
+ return 0;
+}
-static void setup_sorting(void)
+static void setup_sorting(struct perf_sched *sched, const struct option *options,
+ const char * const usage_msg[])
{
- char *tmp, *tok, *str = strdup(sort_order);
+ char *tmp, *tok, *str = strdup(sched->sort_order);
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
- if (sort_dimension__add(tok, &sort_list) < 0) {
+ if (sort_dimension__add(tok, &sched->sort_list) < 0) {
error("Unknown --sort key: `%s'", tok);
- usage_with_options(latency_usage, latency_options);
+ usage_with_options(usage_msg, options);
}
}
free(str);
- sort_dimension__add("pid", &cmp_pid);
+ sort_dimension__add("pid", &sched->cmp_pid);
}
-static const char *record_args[] = {
- "record",
- "-a",
- "-R",
- "-f",
- "-m", "1024",
- "-c", "1",
- "-e", "sched:sched_switch",
- "-e", "sched:sched_stat_wait",
- "-e", "sched:sched_stat_sleep",
- "-e", "sched:sched_stat_iowait",
- "-e", "sched:sched_stat_runtime",
- "-e", "sched:sched_process_exit",
- "-e", "sched:sched_process_fork",
- "-e", "sched:sched_wakeup",
- "-e", "sched:sched_migrate_task",
-};
-
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
+ const char * const record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "sched:sched_switch",
+ "-e", "sched:sched_stat_wait",
+ "-e", "sched:sched_stat_sleep",
+ "-e", "sched:sched_stat_iowait",
+ "-e", "sched:sched_stat_runtime",
+ "-e", "sched:sched_process_exit",
+ "-e", "sched:sched_process_fork",
+ "-e", "sched:sched_wakeup",
+ "-e", "sched:sched_migrate_task",
+ };
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
@@ -1897,8 +1664,85 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-int cmd_sched(int argc, const char **argv, const char *prefix __used)
+int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
{
+ const char default_sort_order[] = "avg, max, switch, runtime";
+ struct perf_sched sched = {
+ .tool = {
+ .sample = perf_sched__process_tracepoint_sample,
+ .comm = perf_event__process_comm,
+ .lost = perf_event__process_lost,
+ .fork = perf_event__process_task,
+ .ordered_samples = true,
+ },
+ .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
+ .sort_list = LIST_HEAD_INIT(sched.sort_list),
+ .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .curr_pid = { [0 ... MAX_CPUS - 1] = -1 },
+ .sort_order = default_sort_order,
+ .replay_repeat = 10,
+ .profile_cpu = -1,
+ .next_shortname1 = 'A',
+ .next_shortname2 = '0',
+ };
+ const struct option latency_options[] = {
+ OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
+ "sort by key(s): runtime, switch, avg, max"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_INTEGER('C', "CPU", &sched.profile_cpu,
+ "CPU to profile on"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+ };
+ const struct option replay_options[] = {
+ OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
+ "repeat the workload replay N times (-1: infinite)"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+ };
+ const struct option sched_options[] = {
+ OPT_STRING('i', "input", &sched.input_name, "file",
+ "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+ };
+ const char * const latency_usage[] = {
+ "perf sched latency [<options>]",
+ NULL
+ };
+ const char * const replay_usage[] = {
+ "perf sched replay [<options>]",
+ NULL
+ };
+ const char * const sched_usage[] = {
+ "perf sched [<options>] {record|latency|map|replay|script}",
+ NULL
+ };
+ struct trace_sched_handler lat_ops = {
+ .wakeup_event = latency_wakeup_event,
+ .switch_event = latency_switch_event,
+ .runtime_event = latency_runtime_event,
+ .fork_event = latency_fork_event,
+ .migrate_task_event = latency_migrate_task_event,
+ };
+ struct trace_sched_handler map_ops = {
+ .switch_event = map_switch_event,
+ };
+ struct trace_sched_handler replay_ops = {
+ .wakeup_event = replay_wakeup_event,
+ .switch_event = replay_switch_event,
+ .fork_event = replay_fork_event,
+ };
+
argc = parse_options(argc, argv, sched_options, sched_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
@@ -1914,26 +1758,26 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "lat", 3)) {
- trace_handler = &lat_ops;
+ sched.tp_handler = &lat_ops;
if (argc > 1) {
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc)
usage_with_options(latency_usage, latency_options);
}
- setup_sorting();
- __cmd_lat();
+ setup_sorting(&sched, latency_options, latency_usage);
+ return perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
- trace_handler = &map_ops;
- setup_sorting();
- __cmd_map();
+ sched.tp_handler = &map_ops;
+ setup_sorting(&sched, latency_options, latency_usage);
+ return perf_sched__map(&sched);
} else if (!strncmp(argv[0], "rep", 3)) {
- trace_handler = &replay_ops;
+ sched.tp_handler = &replay_ops;
if (argc) {
argc = parse_options(argc, argv, replay_options, replay_usage, 0);
if (argc)
usage_with_options(replay_usage, replay_options);
}
- __cmd_replay();
+ return perf_sched__replay(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 1e60ab70b2b..1be843aa154 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -14,6 +14,7 @@
#include "util/util.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/sort.h"
#include <linux/bitmap.h>
static char const *script_name;
@@ -28,11 +29,6 @@ static bool system_wide;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
-struct perf_script {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
enum perf_output_field {
PERF_OUTPUT_COMM = 1U << 0,
PERF_OUTPUT_TID = 1U << 1,
@@ -262,14 +258,11 @@ static int perf_session__check_output_opt(struct perf_session *session)
return 0;
}
-static void print_sample_start(struct pevent *pevent,
- struct perf_sample *sample,
+static void print_sample_start(struct perf_sample *sample,
struct thread *thread,
struct perf_evsel *evsel)
{
- int type;
struct perf_event_attr *attr = &evsel->attr;
- struct event_format *event;
const char *evname = NULL;
unsigned long secs;
unsigned long usecs;
@@ -307,20 +300,7 @@ static void print_sample_start(struct pevent *pevent,
}
if (PRINT_FIELD(EVNAME)) {
- if (attr->type == PERF_TYPE_TRACEPOINT) {
- /*
- * XXX Do we really need this here?
- * perf_evlist__set_tracepoint_names should have done
- * this already
- */
- type = trace_parse_common_type(pevent,
- sample->raw_data);
- event = pevent_find_event(pevent, type);
- if (event)
- evname = event->name;
- } else
- evname = perf_evsel__name(evsel);
-
+ evname = perf_evsel__name(evsel);
printf("%s: ", evname ? evname : "[unknown]");
}
}
@@ -401,7 +381,7 @@ static void print_sample_bts(union perf_event *event,
printf(" ");
else
printf("\n");
- perf_event__print_ip(event, sample, machine,
+ perf_evsel__print_ip(evsel, event, sample, machine,
PRINT_FIELD(SYM), PRINT_FIELD(DSO),
PRINT_FIELD(SYMOFFSET));
}
@@ -415,19 +395,17 @@ static void print_sample_bts(union perf_event *event,
printf("\n");
}
-static void process_event(union perf_event *event __unused,
- struct pevent *pevent,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine,
- struct thread *thread)
+static void process_event(union perf_event *event, struct perf_sample *sample,
+ struct perf_evsel *evsel, struct machine *machine,
+ struct addr_location *al)
{
struct perf_event_attr *attr = &evsel->attr;
+ struct thread *thread = al->thread;
if (output[attr->type].fields == 0)
return;
- print_sample_start(pevent, sample, thread, evsel);
+ print_sample_start(sample, thread, evsel);
if (is_bts_event(attr)) {
print_sample_bts(event, sample, evsel, machine, thread);
@@ -435,9 +413,8 @@ static void process_event(union perf_event *event __unused,
}
if (PRINT_FIELD(TRACE))
- print_trace_event(pevent, sample->cpu, sample->raw_data,
- sample->raw_size);
-
+ event_format__print(evsel->tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size);
if (PRINT_FIELD(ADDR))
print_sample_addr(event, sample, machine, thread, attr);
@@ -446,7 +423,7 @@ static void process_event(union perf_event *event __unused,
printf(" ");
else
printf("\n");
- perf_event__print_ip(event, sample, machine,
+ perf_evsel__print_ip(evsel, event, sample, machine,
PRINT_FIELD(SYM), PRINT_FIELD(DSO),
PRINT_FIELD(SYMOFFSET));
}
@@ -454,9 +431,9 @@ static void process_event(union perf_event *event __unused,
printf("\n");
}
-static int default_start_script(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
+static int default_start_script(const char *script __maybe_unused,
+ int argc __maybe_unused,
+ const char **argv __maybe_unused)
{
return 0;
}
@@ -466,8 +443,8 @@ static int default_stop_script(void)
return 0;
}
-static int default_generate_script(struct pevent *pevent __unused,
- const char *outfile __unused)
+static int default_generate_script(struct pevent *pevent __maybe_unused,
+ const char *outfile __maybe_unused)
{
return 0;
}
@@ -498,14 +475,13 @@ static int cleanup_scripting(void)
static const char *input_name;
-static int process_sample_event(struct perf_tool *tool __used,
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct addr_location al;
- struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
if (thread == NULL) {
@@ -537,32 +513,29 @@ static int process_sample_event(struct perf_tool *tool __used,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, scr->session->pevent,
- sample, evsel, machine, thread);
+ scripting_ops->process_event(event, sample, evsel, machine, &al);
evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_script perf_script = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
- },
+static struct perf_tool perf_script = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
};
extern volatile int session_done;
-static void sig_handler(int sig __unused)
+static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
@@ -573,7 +546,7 @@ static int __cmd_script(struct perf_session *session)
signal(SIGINT, sig_handler);
- ret = perf_session__process_events(session, &perf_script.tool);
+ ret = perf_session__process_events(session, &perf_script);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -672,8 +645,8 @@ static void list_available_languages(void)
fprintf(stderr, "\n");
}
-static int parse_scriptname(const struct option *opt __used,
- const char *str, int unset __used)
+static int parse_scriptname(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
char spec[PATH_MAX];
const char *script, *ext;
@@ -718,8 +691,8 @@ static int parse_scriptname(const struct option *opt __used,
return 0;
}
-static int parse_output_fields(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_output_fields(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
char *tok;
int i, imax = sizeof(all_output_options) / sizeof(struct output_option);
@@ -1010,8 +983,9 @@ static char *get_script_root(struct dirent *script_dirent, const char *suffix)
return script_root;
}
-static int list_available_scripts(const struct option *opt __used,
- const char *s __used, int unset __used)
+static int list_available_scripts(const struct option *opt __maybe_unused,
+ const char *s __maybe_unused,
+ int unset __maybe_unused)
{
struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
char scripts_path[MAXPATHLEN];
@@ -1058,6 +1032,61 @@ static int list_available_scripts(const struct option *opt __used,
exit(0);
}
+/*
+ * Return -1 if none is found, otherwise the actual scripts number.
+ *
+ * Currently the only user of this function is the script browser, which
+ * will list all statically runnable scripts, select one, execute it and
+ * show the output in a perf browser.
+ */
+int find_scripts(char **scripts_array, char **scripts_path_array)
+{
+ struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ char scripts_path[MAXPATHLEN];
+ DIR *scripts_dir, *lang_dir;
+ char lang_path[MAXPATHLEN];
+ char *temp;
+ int i = 0;
+
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+
+ scripts_dir = opendir(scripts_path);
+ if (!scripts_dir)
+ return -1;
+
+ for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
+ lang_dirent.d_name);
+#ifdef NO_LIBPERL
+ if (strstr(lang_path, "perl"))
+ continue;
+#endif
+#ifdef NO_LIBPYTHON
+ if (strstr(lang_path, "python"))
+ continue;
+#endif
+
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+
+ for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+ /* Skip those real time scripts: xxxtop.p[yl] */
+ if (strstr(script_dirent.d_name, "top."))
+ continue;
+ sprintf(scripts_path_array[i], "%s/%s", lang_path,
+ script_dirent.d_name);
+ temp = strchr(script_dirent.d_name, '.');
+ snprintf(scripts_array[i],
+ (temp - script_dirent.d_name) + 1,
+ "%s", script_dirent.d_name);
+ i++;
+ }
+ }
+
+ return i;
+}
+
static char *get_script_path(const char *script_root, const char *suffix)
{
struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
@@ -1170,6 +1199,8 @@ static const struct option options[] = {
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
+ OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only display events for these comms"),
@@ -1181,21 +1212,26 @@ static const struct option options[] = {
OPT_END()
};
-static bool have_cmd(int argc, const char **argv)
+static int have_cmd(int argc, const char **argv)
{
char **__argv = malloc(sizeof(const char *) * argc);
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ return -1;
+ }
+
memcpy(__argv, argv, sizeof(const char *) * argc);
argc = parse_options(argc, (const char **)__argv, record_options,
NULL, PARSE_OPT_STOP_AT_NON_OPTION);
free(__argv);
- return argc != 0;
+ system_wide = (argc == 0);
+
+ return 0;
}
-int cmd_script(int argc, const char **argv, const char *prefix __used)
+int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
{
char *rec_script_path = NULL;
char *rep_script_path = NULL;
@@ -1259,13 +1295,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (pipe(live_pipe) < 0) {
perror("failed to create pipe");
- exit(-1);
+ return -1;
}
pid = fork();
if (pid < 0) {
perror("failed to fork");
- exit(-1);
+ return -1;
}
if (!pid) {
@@ -1277,13 +1313,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (is_top_script(argv[0])) {
system_wide = true;
} else if (!system_wide) {
- system_wide = !have_cmd(argc - rep_args,
- &argv[rep_args]);
+ if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) {
+ err = -1;
+ goto out;
+ }
}
__argv = malloc((argc + 6) * sizeof(const char *));
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
__argv[j++] = "/bin/sh";
__argv[j++] = rec_script_path;
@@ -1305,8 +1346,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
close(live_pipe[1]);
__argv = malloc((argc + 4) * sizeof(const char *));
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
j = 0;
__argv[j++] = "/bin/sh";
__argv[j++] = rep_script_path;
@@ -1331,12 +1376,20 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (!rec_script_path)
system_wide = false;
- else if (!system_wide)
- system_wide = !have_cmd(argc - 1, &argv[1]);
+ else if (!system_wide) {
+ if (have_cmd(argc - 1, &argv[1]) != 0) {
+ err = -1;
+ goto out;
+ }
+ }
__argv = malloc((argc + 2) * sizeof(const char *));
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
__argv[j++] = "/bin/sh";
__argv[j++] = script_path;
if (system_wide)
@@ -1356,12 +1409,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
setup_pager();
session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_script.tool);
+ &perf_script);
if (session == NULL)
return -ENOMEM;
- perf_script.session = session;
-
if (cpu_list) {
if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
return -1;
@@ -1387,18 +1438,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
input = open(session->filename, O_RDONLY); /* input_name */
if (input < 0) {
perror("failed to open file");
- exit(-1);
+ return -1;
}
err = fstat(input, &perf_stat);
if (err < 0) {
perror("failed to stat file");
- exit(-1);
+ return -1;
}
if (!perf_stat.st_size) {
fprintf(stderr, "zero-sized file, nothing to do!\n");
- exit(0);
+ return 0;
}
scripting_ops = script_spec__lookup(generate_script_lang);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 861f0aec77a..e8cd4d81b06 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -51,13 +51,13 @@
#include "util/evsel.h"
#include "util/debug.h"
#include "util/color.h"
+#include "util/stat.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include <sys/prctl.h>
-#include <math.h>
#include <locale.h>
#define DEFAULT_SEPARATOR " "
@@ -199,11 +199,6 @@ static int output_fd;
static volatile int done = 0;
-struct stats
-{
- double n, mean, M2;
-};
-
struct perf_stat {
struct stats res_stats[3];
};
@@ -220,48 +215,14 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
evsel->priv = NULL;
}
-static void update_stats(struct stats *stats, u64 val)
-{
- double delta;
-
- stats->n++;
- delta = val - stats->mean;
- stats->mean += delta / stats->n;
- stats->M2 += delta*(val - stats->mean);
-}
-
-static double avg_stats(struct stats *stats)
+static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
{
- return stats->mean;
+ return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
}
-/*
- * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
- *
- * (\Sum n_i^2) - ((\Sum n_i)^2)/n
- * s^2 = -------------------------------
- * n - 1
- *
- * http://en.wikipedia.org/wiki/Stddev
- *
- * The std dev of the mean is related to the std dev by:
- *
- * s
- * s_mean = -------
- * sqrt(n)
- *
- */
-static double stddev_stats(struct stats *stats)
+static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
{
- double variance, variance_mean;
-
- if (!stats->n)
- return 0.0;
-
- variance = stats->M2 / (stats->n - 1);
- variance_mean = variance / stats->n;
-
- return sqrt(variance_mean);
+ return perf_evsel__cpus(evsel)->nr;
}
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
@@ -281,13 +242,9 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_evsel *first)
{
struct perf_event_attr *attr = &evsel->attr;
- struct xyarray *group_fd = NULL;
bool exclude_guest_missing = false;
int ret;
- if (group && evsel != first)
- group_fd = first->fd;
-
if (scale)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
@@ -299,8 +256,7 @@ retry:
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
if (perf_target__has_cpu(&target)) {
- ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
- group, group_fd);
+ ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
if (ret)
goto check_ret;
return 0;
@@ -311,8 +267,7 @@ retry:
attr->enable_on_exec = 1;
}
- ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
- group, group_fd);
+ ret = perf_evsel__open_per_thread(evsel, evsel_list->threads);
if (!ret)
return 0;
/* fall through */
@@ -382,7 +337,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
u64 *count = counter->counts->aggr.values;
int i;
- if (__perf_evsel__read(counter, evsel_list->cpus->nr,
+ if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter),
evsel_list->threads->nr, scale) < 0)
return -1;
@@ -411,7 +366,7 @@ static int read_counter(struct perf_evsel *counter)
u64 *count;
int cpu;
- for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
return -1;
@@ -423,7 +378,7 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
-static int run_perf_stat(int argc __used, const char **argv)
+static int run_perf_stat(int argc __maybe_unused, const char **argv)
{
unsigned long long t0, t1;
struct perf_evsel *counter, *first;
@@ -434,7 +389,7 @@ static int run_perf_stat(int argc __used, const char **argv)
if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
perror("failed to create pipes");
- exit(1);
+ return -1;
}
if (forks) {
@@ -483,7 +438,10 @@ static int run_perf_stat(int argc __used, const char **argv)
close(child_ready_pipe[0]);
}
- first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
+ if (group)
+ perf_evlist__set_leader(evsel_list);
+
+ first = perf_evlist__first(evsel_list);
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
@@ -513,13 +471,14 @@ static int run_perf_stat(int argc __used, const char **argv)
}
if (child_pid != -1)
kill(child_pid, SIGTERM);
- die("Not all events could be opened.\n");
+
+ pr_err("Not all events could be opened.\n");
return -1;
}
counter->supported = true;
}
- if (perf_evlist__set_filters(evsel_list)) {
+ if (perf_evlist__apply_filters(evsel_list)) {
error("failed to set filter with %d (%s)\n", errno,
strerror(errno));
return -1;
@@ -546,12 +505,12 @@ static int run_perf_stat(int argc __used, const char **argv)
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter(counter);
- perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
+ perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1);
}
} else {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter_aggr(counter);
- perf_evsel__close_fd(counter, evsel_list->cpus->nr,
+ perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
evsel_list->threads->nr);
}
}
@@ -561,10 +520,7 @@ static int run_perf_stat(int argc __used, const char **argv)
static void print_noise_pct(double total, double avg)
{
- double pct = 0.0;
-
- if (avg)
- pct = 100.0*total/avg;
+ double pct = rel_stddev_stats(total, avg);
if (csv_output)
fprintf(output, "%s%.2f%%", csv_sep, pct);
@@ -592,7 +548,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
- evsel_list->cpus->map[cpu], csv_sep);
+ perf_evsel__cpus(evsel)->map[cpu], csv_sep);
fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
@@ -636,7 +592,9 @@ static const char *get_ratio_color(enum grc_type type, double ratio)
return color;
}
-static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_stalled_cycles_frontend(int cpu,
+ struct perf_evsel *evsel
+ __maybe_unused, double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -653,7 +611,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us
fprintf(output, " frontend cycles idle ");
}
-static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_stalled_cycles_backend(int cpu,
+ struct perf_evsel *evsel
+ __maybe_unused, double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -670,7 +630,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use
fprintf(output, " backend cycles idle ");
}
-static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_branch_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -687,7 +649,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double
fprintf(output, " of all branches ");
}
-static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_l1_dcache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -704,7 +668,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou
fprintf(output, " of all L1-dcache hits ");
}
-static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_l1_icache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -721,7 +687,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou
fprintf(output, " of all L1-icache hits ");
}
-static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_dtlb_cache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -738,7 +706,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
fprintf(output, " of all dTLB cache hits ");
}
-static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_itlb_cache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -755,7 +725,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
fprintf(output, " of all iTLB cache hits ");
}
-static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_ll_cache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -788,7 +760,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
- evsel_list->cpus->map[cpu], csv_sep);
+ perf_evsel__cpus(evsel)->map[cpu], csv_sep);
else
cpu = 0;
@@ -949,14 +921,14 @@ static void print_counter(struct perf_evsel *counter)
u64 ena, run, val;
int cpu;
- for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
val = counter->counts->cpu[cpu].val;
ena = counter->counts->cpu[cpu].ena;
run = counter->counts->cpu[cpu].run;
if (run == 0 || ena == 0) {
fprintf(output, "CPU%*d%s%*s%s%*s",
csv_output ? 0 : -4,
- evsel_list->cpus->map[cpu], csv_sep,
+ perf_evsel__cpus(counter)->map[cpu], csv_sep,
csv_output ? 0 : 18,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
csv_sep,
@@ -1061,8 +1033,8 @@ static const char * const stat_usage[] = {
NULL
};
-static int stat__set_big_num(const struct option *opt __used,
- const char *s __used, int unset)
+static int stat__set_big_num(const struct option *opt __maybe_unused,
+ const char *s __maybe_unused, int unset)
{
big_num_opt = unset ? 0 : 1;
return 0;
@@ -1156,7 +1128,7 @@ static int add_default_attributes(void)
return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
}
-int cmd_stat(int argc, const char **argv, const char *prefix __used)
+int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_evsel *pos;
int status = -ENOMEM;
@@ -1192,7 +1164,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
output = fopen(output_name, mode);
if (!output) {
perror("failed to create output file");
- exit(-1);
+ return -1;
}
clock_gettime(CLOCK_REALTIME, &tm);
fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
@@ -1255,7 +1227,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
- perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
+ perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
goto out_free_fd;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1d592f5cbea..484f26cc0c0 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -14,11 +14,13 @@
#include "util/symbol.h"
#include "util/thread_map.h"
#include "util/pmu.h"
+#include "event-parse.h"
#include "../../include/linux/hw_breakpoint.h"
#include <sys/mman.h>
-static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
+static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
+ struct symbol *sym)
{
bool *visited = symbol__priv(sym);
*visited = true;
@@ -294,7 +296,7 @@ static int test__open_syscall_event(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
+ if (perf_evsel__open_per_thread(evsel, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -369,7 +371,7 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
+ if (perf_evsel__open(evsel, cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -533,7 +535,7 @@ static int test__basic_mmap(void)
perf_evlist__add(evlist, evsels[i]);
- if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
+ if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -562,7 +564,7 @@ static int test__basic_mmap(void)
goto out_munmap;
}
- err = perf_evlist__parse_sample(evlist, event, &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
@@ -710,7 +712,7 @@ static int test__PERF_RECORD(void)
/*
* Config the evsels, setting attr->comm on the first one, etc.
*/
- evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+ evsel = perf_evlist__first(evlist);
evsel->attr.sample_type |= PERF_SAMPLE_CPU;
evsel->attr.sample_type |= PERF_SAMPLE_TID;
evsel->attr.sample_type |= PERF_SAMPLE_TIME;
@@ -737,7 +739,7 @@ static int test__PERF_RECORD(void)
* Call sys_perf_event_open on all the fds on all the evsels,
* grouping them if asked to.
*/
- err = perf_evlist__open(evlist, opts.group);
+ err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
goto out_delete_evlist;
@@ -779,7 +781,7 @@ static int test__PERF_RECORD(void)
if (type < PERF_RECORD_MAX)
nr_events[type]++;
- err = perf_evlist__parse_sample(evlist, event, &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
if (verbose)
perf_event__fprintf(event, stderr);
@@ -996,7 +998,9 @@ static u64 mmap_read_self(void *addr)
/*
* If the RDPMC instruction faults then signal this back to the test parent task:
*/
-static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used)
+static void segfault_handler(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
{
exit(-1);
}
@@ -1023,14 +1027,16 @@ static int __test__rdpmc(void)
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd < 0) {
- die("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd, strerror(errno));
+ pr_err("Error: sys_perf_event_open() syscall returned "
+ "with %d (%s)\n", fd, strerror(errno));
+ return -1;
}
addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
if (addr == (void *)(-1)) {
- die("Error: mmap() syscall returned "
- "with (%s)\n", strerror(errno));
+ pr_err("Error: mmap() syscall returned with (%s)\n",
+ strerror(errno));
+ goto out_close;
}
for (n = 0; n < 6; n++) {
@@ -1051,9 +1057,9 @@ static int __test__rdpmc(void)
}
munmap(addr, page_size);
- close(fd);
-
pr_debug(" ");
+out_close:
+ close(fd);
if (!delta_sum)
return -1;
@@ -1092,6 +1098,309 @@ static int test__perf_pmu(void)
return perf_pmu__test();
}
+static int perf_evsel__roundtrip_cache_name_test(void)
+{
+ char name[128];
+ int type, op, err = 0, ret = 0, i, idx;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+ name, sizeof(name));
+ err = parse_events(evlist, name, 0);
+ if (err)
+ ret = err;
+ }
+ }
+ }
+
+ idx = 0;
+ evsel = perf_evlist__first(evlist);
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+ name, sizeof(name));
+ if (evsel->idx != idx)
+ continue;
+
+ ++idx;
+
+ if (strcmp(perf_evsel__name(evsel), name)) {
+ pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
+ ret = -1;
+ }
+
+ evsel = perf_evsel__next(evsel);
+ }
+ }
+ }
+
+ perf_evlist__delete(evlist);
+ return ret;
+}
+
+static int __perf_evsel__name_array_test(const char *names[], int nr_names)
+{
+ int i, err;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_names; ++i) {
+ err = parse_events(evlist, names[i], 0);
+ if (err) {
+ pr_debug("failed to parse event '%s', err %d\n",
+ names[i], err);
+ goto out_delete_evlist;
+ }
+ }
+
+ err = 0;
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
+ --err;
+ pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
+ }
+ }
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ return err;
+}
+
+#define perf_evsel__name_array_test(names) \
+ __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
+
+static int perf_evsel__roundtrip_name_test(void)
+{
+ int err = 0, ret = 0;
+
+ err = perf_evsel__name_array_test(perf_evsel__hw_names);
+ if (err)
+ ret = err;
+
+ err = perf_evsel__name_array_test(perf_evsel__sw_names);
+ if (err)
+ ret = err;
+
+ err = perf_evsel__roundtrip_cache_name_test();
+ if (err)
+ ret = err;
+
+ return ret;
+}
+
+static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
+ int size, bool should_be_signed)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ int is_signed;
+ int ret = 0;
+
+ if (field == NULL) {
+ pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
+ return -1;
+ }
+
+ is_signed = !!(field->flags | FIELD_IS_SIGNED);
+ if (should_be_signed && !is_signed) {
+ pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
+ evsel->name, name, is_signed, should_be_signed);
+ ret = -1;
+ }
+
+ if (field->size != size) {
+ pr_debug("%s: \"%s\" size (%d) should be %d!\n",
+ evsel->name, name, field->size, size);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int perf_evsel__tp_sched_test(void)
+{
+ struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
+ int ret = 0;
+
+ if (evsel == NULL) {
+ pr_debug("perf_evsel__new\n");
+ return -1;
+ }
+
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prev_state", 8, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "next_pid", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "next_prio", 4, true))
+ ret = -1;
+
+ perf_evsel__delete(evsel);
+
+ evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
+
+ if (perf_evsel__test_field(evsel, "comm", 16, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "pid", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prio", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "success", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
+ ret = -1;
+
+ return ret;
+}
+
+static int test__syscall_open_tp_fields(void)
+{
+ struct perf_record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .no_delay = true,
+ .freq = 1,
+ .mmap_pages = 256,
+ .raw_samples = true,
+ };
+ const char *filename = "/etc/passwd";
+ int flags = O_RDONLY | O_DIRECTORY;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+ struct perf_evsel *evsel;
+ int err = -1, i, nr_events = 0, nr_polls = 0;
+
+ if (evlist == NULL) {
+ pr_debug("%s: perf_evlist__new\n", __func__);
+ goto out;
+ }
+
+ evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
+ if (evsel == NULL) {
+ pr_debug("%s: perf_evsel__newtp\n", __func__);
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__add(evlist, evsel);
+
+ err = perf_evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("%s: perf_evlist__create_maps\n", __func__);
+ goto out_delete_evlist;
+ }
+
+ perf_evsel__config(evsel, &opts, evsel);
+
+ evlist->threads->map[0] = getpid();
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ if (err < 0) {
+ pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__enable(evlist);
+
+ /*
+ * Generate the event:
+ */
+ open(filename, flags);
+
+ while (1) {
+ int before = nr_events;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+ int tp_flags;
+ struct perf_sample sample;
+
+ ++nr_events;
+
+ if (type != PERF_RECORD_SAMPLE)
+ continue;
+
+ err = perf_evsel__parse_sample(evsel, event, &sample);
+ if (err) {
+ pr_err("Can't parse sample, err = %d\n", err);
+ goto out_munmap;
+ }
+
+ tp_flags = perf_evsel__intval(evsel, &sample, "flags");
+
+ if (flags != tp_flags) {
+ pr_debug("%s: Expected flags=%#x, got %#x\n",
+ __func__, flags, tp_flags);
+ goto out_munmap;
+ }
+
+ goto out_ok;
+ }
+ }
+
+ if (nr_events == before)
+ poll(evlist->pollfd, evlist->nr_fds, 10);
+
+ if (++nr_polls > 5) {
+ pr_debug("%s: no events!\n", __func__);
+ goto out_munmap;
+ }
+ }
+out_ok:
+ err = 0;
+out_munmap:
+ perf_evlist__munmap(evlist);
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+out:
+ return err;
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -1135,6 +1444,18 @@ static struct test {
.func = dso__test_data,
},
{
+ .desc = "roundtrip evsel->name check",
+ .func = perf_evsel__roundtrip_name_test,
+ },
+ {
+ .desc = "Check parsing of sched tracepoints fields",
+ .func = perf_evsel__tp_sched_test,
+ },
+ {
+ .desc = "Generate and check syscalls:sys_enter_open event fields",
+ .func = test__syscall_open_tp_fields,
+ },
+ {
.func = NULL,
},
};
@@ -1199,7 +1520,7 @@ static int perf_test__list(int argc, const char **argv)
return 0;
}
-int cmd_test(int argc, const char **argv, const char *prefix __used)
+int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const test_usage[] = {
"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 3b75b2e21ea..b1a8a3b841c 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -168,9 +168,8 @@ static struct per_pid *find_create_pid(int pid)
return cursor;
cursor = cursor->next;
}
- cursor = malloc(sizeof(struct per_pid));
+ cursor = zalloc(sizeof(*cursor));
assert(cursor != NULL);
- memset(cursor, 0, sizeof(struct per_pid));
cursor->pid = pid;
cursor->next = all_data;
all_data = cursor;
@@ -195,9 +194,8 @@ static void pid_set_comm(int pid, char *comm)
}
c = c->next;
}
- c = malloc(sizeof(struct per_pidcomm));
+ c = zalloc(sizeof(*c));
assert(c != NULL);
- memset(c, 0, sizeof(struct per_pidcomm));
c->comm = strdup(comm);
p->current = c;
c->next = p->all;
@@ -239,17 +237,15 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
p = find_create_pid(pid);
c = p->current;
if (!c) {
- c = malloc(sizeof(struct per_pidcomm));
+ c = zalloc(sizeof(*c));
assert(c != NULL);
- memset(c, 0, sizeof(struct per_pidcomm));
p->current = c;
c->next = p->all;
p->all = c;
}
- sample = malloc(sizeof(struct cpu_sample));
+ sample = zalloc(sizeof(*sample));
assert(sample != NULL);
- memset(sample, 0, sizeof(struct cpu_sample));
sample->start_time = start;
sample->end_time = end;
sample->type = type;
@@ -275,28 +271,28 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int process_comm_event(struct perf_tool *tool __used,
+static int process_comm_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
pid_set_comm(event->comm.tid, event->comm.comm);
return 0;
}
-static int process_fork_event(struct perf_tool *tool __used,
+static int process_fork_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int process_exit_event(struct perf_tool *tool __used,
+static int process_exit_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
pid_exit(event->fork.pid, event->fork.time);
return 0;
@@ -373,11 +369,10 @@ static void c_state_start(int cpu, u64 timestamp, int state)
static void c_state_end(int cpu, u64 timestamp)
{
- struct power_event *pwr;
- pwr = malloc(sizeof(struct power_event));
+ struct power_event *pwr = zalloc(sizeof(*pwr));
+
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
@@ -392,14 +387,13 @@ static void c_state_end(int cpu, u64 timestamp)
static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
{
struct power_event *pwr;
- pwr = malloc(sizeof(struct power_event));
if (new_freq > 8000000) /* detect invalid data */
return;
+ pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
@@ -429,15 +423,13 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
static void
sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
{
- struct wake_event *we;
struct per_pid *p;
struct wakeup_entry *wake = (void *)te;
+ struct wake_event *we = zalloc(sizeof(*we));
- we = malloc(sizeof(struct wake_event));
if (!we)
return;
- memset(we, 0, sizeof(struct wake_event));
we->time = timestamp;
we->waker = pid;
@@ -491,11 +483,11 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
-static int process_sample_event(struct perf_tool *tool __used,
- union perf_event *event __used,
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine __used)
+ struct machine *machine __maybe_unused)
{
struct trace_entry *te;
@@ -579,13 +571,12 @@ static void end_sample_processing(void)
struct power_event *pwr;
for (cpu = 0; cpu <= numcpus; cpu++) {
- pwr = malloc(sizeof(struct power_event));
+ /* C state */
+#if 0
+ pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
- /* C state */
-#if 0
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
pwr->end_time = last_time;
@@ -597,10 +588,9 @@ static void end_sample_processing(void)
#endif
/* P state */
- pwr = malloc(sizeof(struct power_event));
+ pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
@@ -830,11 +820,9 @@ static void draw_process_bars(void)
static void add_process_filter(const char *string)
{
- struct process_filter *filt;
- int pid;
+ int pid = strtoull(string, NULL, 10);
+ struct process_filter *filt = malloc(sizeof(*filt));
- pid = strtoull(string, NULL, 10);
- filt = malloc(sizeof(struct process_filter));
if (!filt)
return;
@@ -1081,7 +1069,8 @@ static int __cmd_record(int argc, const char **argv)
}
static int
-parse_process(const struct option *opt __used, const char *arg, int __used unset)
+parse_process(const struct option *opt __maybe_unused, const char *arg,
+ int __maybe_unused unset)
{
if (arg)
add_process_filter(arg);
@@ -1106,7 +1095,8 @@ static const struct option options[] = {
};
-int cmd_timechart(int argc, const char **argv, const char *prefix __used)
+int cmd_timechart(int argc, const char **argv,
+ const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, options, timechart_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 68cd61ef6ac..e434a16bb5a 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -95,7 +95,8 @@ static void perf_top__update_print_entries(struct perf_top *top)
top->print_entries -= 9;
}
-static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
+static void perf_top__sig_winch(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused, void *arg)
{
struct perf_top *top = arg;
@@ -509,7 +510,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->nr_entries) {
- top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ top->sym_evsel = perf_evlist__first(top->evlist);
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
sleep(1);
break;
@@ -518,7 +519,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
if (top->sym_evsel->idx == counter)
break;
} else
- top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ top->sym_evsel = perf_evlist__first(top->evlist);
break;
case 'f':
prompt_integer(&top->count_filter, "Enter display event count filter");
@@ -663,7 +664,7 @@ static const char *skip_symbols[] = {
NULL
};
-static int symbol_filter(struct map *map __used, struct symbol *sym)
+static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym)
{
const char *name = sym->name;
int i;
@@ -783,8 +784,10 @@ static void perf_event__process_sample(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
- err = machine__resolve_callchain(machine, al.thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel,
+ al.thread, sample,
+ &parent);
+
if (err)
return;
}
@@ -820,7 +823,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
- ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
+ ret = perf_evlist__parse_sample(top->evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
@@ -884,17 +887,14 @@ static void perf_top__mmap_read(struct perf_top *top)
static void perf_top__start_counters(struct perf_top *top)
{
- struct perf_evsel *counter, *first;
+ struct perf_evsel *counter;
struct perf_evlist *evlist = top->evlist;
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ if (top->group)
+ perf_evlist__set_leader(evlist);
list_for_each_entry(counter, &evlist->entries, node) {
struct perf_event_attr *attr = &counter->attr;
- struct xyarray *group_fd = NULL;
-
- if (top->group && counter != first)
- group_fd = first->fd;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
@@ -925,8 +925,7 @@ retry_sample_id:
attr->sample_id_all = top->sample_id_all_missing ? 0 : 1;
try_again:
if (perf_evsel__open(counter, top->evlist->cpus,
- top->evlist->threads, top->group,
- group_fd) < 0) {
+ top->evlist->threads) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
@@ -1165,7 +1164,7 @@ static const char * const top_usage[] = {
NULL
};
-int cmd_top(int argc, const char **argv, const char *prefix __used)
+int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_evsel *pos;
int status;
@@ -1328,7 +1327,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
pos->attr.sample_period = top.default_interval;
}
- top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+ top.sym_evsel = perf_evlist__first(top.evlist);
symbol_conf.priv_size = sizeof(struct annotation);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
new file mode 100644
index 00000000000..8f113dab8bf
--- /dev/null
+++ b/tools/perf/builtin-trace.c
@@ -0,0 +1,310 @@
+#include "builtin.h"
+#include "util/evlist.h"
+#include "util/parse-options.h"
+#include "util/thread_map.h"
+#include "event-parse.h"
+
+#include <libaudit.h>
+#include <stdlib.h>
+
+static struct syscall_fmt {
+ const char *name;
+ const char *alias;
+ bool errmsg;
+ bool timeout;
+} syscall_fmts[] = {
+ { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
+ { .name = "fstat", .errmsg = true, .alias = "newfstat", },
+ { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
+ { .name = "futex", .errmsg = true, },
+ { .name = "poll", .errmsg = true, .timeout = true, },
+ { .name = "ppoll", .errmsg = true, .timeout = true, },
+ { .name = "read", .errmsg = true, },
+ { .name = "recvfrom", .errmsg = true, },
+ { .name = "select", .errmsg = true, .timeout = true, },
+ { .name = "stat", .errmsg = true, .alias = "newstat", },
+};
+
+static int syscall_fmt__cmp(const void *name, const void *fmtp)
+{
+ const struct syscall_fmt *fmt = fmtp;
+ return strcmp(name, fmt->name);
+}
+
+static struct syscall_fmt *syscall_fmt__find(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(syscall_fmts);
+ return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
+}
+
+struct syscall {
+ struct event_format *tp_format;
+ const char *name;
+ struct syscall_fmt *fmt;
+};
+
+struct trace {
+ int audit_machine;
+ struct {
+ int max;
+ struct syscall *table;
+ } syscalls;
+ struct perf_record_opts opts;
+};
+
+static int trace__read_syscall_info(struct trace *trace, int id)
+{
+ char tp_name[128];
+ struct syscall *sc;
+
+ if (id > trace->syscalls.max) {
+ struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
+
+ if (nsyscalls == NULL)
+ return -1;
+
+ if (trace->syscalls.max != -1) {
+ memset(nsyscalls + trace->syscalls.max + 1, 0,
+ (id - trace->syscalls.max) * sizeof(*sc));
+ } else {
+ memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
+ }
+
+ trace->syscalls.table = nsyscalls;
+ trace->syscalls.max = id;
+ }
+
+ sc = trace->syscalls.table + id;
+ sc->name = audit_syscall_to_name(id, trace->audit_machine);
+ if (sc->name == NULL)
+ return -1;
+
+ sc->fmt = syscall_fmt__find(sc->name);
+
+ snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
+ sc->tp_format = event_format__new("syscalls", tp_name);
+
+ if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
+ snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
+ sc->tp_format = event_format__new("syscalls", tp_name);
+ }
+
+ return sc->tp_format != NULL ? 0 : -1;
+}
+
+static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp)
+{
+ int i = 0;
+ size_t printed = 0;
+
+ if (sc->tp_format != NULL) {
+ struct format_field *field;
+
+ for (field = sc->tp_format->format.fields->next; field; field = field->next) {
+ printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "",
+ field->name, args[i++]);
+ }
+ } else {
+ while (i < 6) {
+ printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]);
+ ++i;
+ }
+ }
+
+ return printed;
+}
+
+static int trace__run(struct trace *trace)
+{
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+ struct perf_evsel *evsel, *evsel_enter, *evsel_exit;
+ int err = -1, i, nr_events = 0, before;
+
+ if (evlist == NULL) {
+ printf("Not enough memory to run!\n");
+ goto out;
+ }
+
+ evsel_enter = perf_evsel__newtp("raw_syscalls", "sys_enter", 0);
+ if (evsel_enter == NULL) {
+ printf("Couldn't read the raw_syscalls:sys_enter tracepoint information!\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__add(evlist, evsel_enter);
+
+ evsel_exit = perf_evsel__newtp("raw_syscalls", "sys_exit", 1);
+ if (evsel_exit == NULL) {
+ printf("Couldn't read the raw_syscalls:sys_exit tracepoint information!\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__add(evlist, evsel_exit);
+
+ err = perf_evlist__create_maps(evlist, &trace->opts.target);
+ if (err < 0) {
+ printf("Problems parsing the target to trace, check your options!\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__config_attrs(evlist, &trace->opts);
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ printf("Couldn't create the events: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ if (err < 0) {
+ printf("Couldn't mmap the events: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__enable(evlist);
+again:
+ before = nr_events;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+ struct syscall *sc;
+ struct perf_sample sample;
+ int id;
+
+ ++nr_events;
+
+ switch (type) {
+ case PERF_RECORD_SAMPLE:
+ break;
+ case PERF_RECORD_LOST:
+ printf("LOST %" PRIu64 " events!\n", event->lost.lost);
+ continue;
+ default:
+ printf("Unexpected %s event, skipping...\n",
+ perf_event__name(type));
+ continue;
+ }
+
+ err = perf_evlist__parse_sample(evlist, event, &sample);
+ if (err) {
+ printf("Can't parse sample, err = %d, skipping...\n", err);
+ continue;
+ }
+
+ evsel = perf_evlist__id2evsel(evlist, sample.id);
+ if (evsel == NULL) {
+ printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
+ continue;
+ }
+
+ id = perf_evsel__intval(evsel, &sample, "id");
+ if (id < 0) {
+ printf("Invalid syscall %d id, skipping...\n", id);
+ continue;
+ }
+
+ if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
+ trace__read_syscall_info(trace, id))
+ continue;
+
+ if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
+ continue;
+
+ sc = &trace->syscalls.table[id];
+
+ if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
+ printf("%d ", sample.tid);
+
+ if (evsel == evsel_enter) {
+ void *args = perf_evsel__rawptr(evsel, &sample, "args");
+
+ printf("%s(", sc->name);
+ syscall__fprintf_args(sc, args, stdout);
+ } else if (evsel == evsel_exit) {
+ int ret = perf_evsel__intval(evsel, &sample, "ret");
+
+ if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
+ char bf[256];
+ const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
+ *e = audit_errno_to_name(-ret);
+
+ printf(") = -1 %s %s", e, emsg);
+ } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
+ printf(") = 0 Timeout");
+ else
+ printf(") = %d", ret);
+
+ putchar('\n');
+ }
+ }
+ }
+
+ if (nr_events == before)
+ poll(evlist->pollfd, evlist->nr_fds, -1);
+
+ goto again;
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+out:
+ return err;
+}
+
+int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+ const char * const trace_usage[] = {
+ "perf trace [<options>]",
+ NULL
+ };
+ struct trace trace = {
+ .audit_machine = audit_detect_machine(),
+ .syscalls = {
+ . max = -1,
+ },
+ .opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .no_delay = true,
+ .mmap_pages = 1024,
+ },
+ };
+ const struct option trace_options[] = {
+ OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
+ "trace events on existing process id"),
+ OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
+ "trace events on existing thread id"),
+ OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
+ "list of cpus to monitor"),
+ OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
+ "child tasks do not inherit counters"),
+ OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
+ "number of mmap data pages"),
+ OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
+ "user to profile"),
+ OPT_END()
+ };
+ int err;
+
+ argc = parse_options(argc, argv, trace_options, trace_usage, 0);
+ if (argc)
+ usage_with_options(trace_usage, trace_options);
+
+ err = perf_target__parse_uid(&trace.opts.target);
+ if (err) {
+ char bf[BUFSIZ];
+ perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
+ printf("%s", bf);
+ return err;
+ }
+
+ return trace__run(&trace);
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index b382bd551aa..08143bd854c 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -34,6 +34,8 @@ extern int cmd_kmem(int argc, const char **argv, const char *prefix);
extern int cmd_lock(int argc, const char **argv, const char *prefix);
extern int cmd_kvm(int argc, const char **argv, const char *prefix);
extern int cmd_test(int argc, const char **argv, const char *prefix);
+extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_inject(int argc, const char **argv, const char *prefix);
+extern int find_scripts(char **scripts_array, char **scripts_path_array);
#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index d695fe40fbf..3e86bbd8c2d 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -17,8 +17,9 @@ perf-report mainporcelain common
perf-stat mainporcelain common
perf-timechart mainporcelain common
perf-top mainporcelain common
+perf-trace mainporcelain common
perf-script mainporcelain common
-perf-probe mainporcelain common
+perf-probe mainporcelain full
perf-kmem mainporcelain common
perf-lock mainporcelain common
perf-kvm mainporcelain common
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 6c18785a641..4add41bb0c7 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -154,3 +154,53 @@ int main(void)
return 0;
}
endef
+
+ifndef NO_LIBUNWIND
+define SOURCE_LIBUNWIND
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+ addr_space = unw_create_addr_space(NULL, 0);
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+ return 0;
+}
+endef
+endif
+
+ifndef NO_BACKTRACE
+define SOURCE_BACKTRACE
+#include <execinfo.h>
+#include <stdio.h>
+
+int main(void)
+{
+ backtrace(NULL, 0);
+ backtrace_symbols(NULL, 0);
+ return 0;
+}
+endef
+endif
+
+ifndef NO_LIBAUDIT
+define SOURCE_LIBAUDIT
+#include <libaudit.h>
+
+int main(void)
+{
+ return audit_open();
+}
+endef
+endif \ No newline at end of file
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 95b6f8b6177..e9193062026 100644
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -24,7 +24,7 @@ NOBUILDID=0000000000000000000000000000000000000000
perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS
if [ ! -s $BUILDIDS ] ; then
echo "perf archive: no build-ids found"
- rm -f $BUILDIDS
+ rm $BUILDIDS || true
exit 1
fi
@@ -39,8 +39,8 @@ while read build_id ; do
echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST
done
-tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
-rm -f $MANIFEST $BUILDIDS
+tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+rm $MANIFEST $BUILDIDS || true
echo -e "Now please run:\n"
echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
echo "wherever you need to run 'perf report' on."
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 2b2e225a4d4..fc2f770e302 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -14,6 +14,7 @@
#include "util/run-command.h"
#include "util/parse-events.h"
#include "util/debugfs.h"
+#include <pthread.h>
const char perf_usage_string[] =
"perf [--version] [--help] COMMAND [ARGS]";
@@ -24,6 +25,42 @@ const char perf_more_info_string[] =
int use_browser = -1;
static int use_pager = -1;
+struct cmd_struct {
+ const char *cmd;
+ int (*fn)(int, const char **, const char *);
+ int option;
+};
+
+static struct cmd_struct commands[] = {
+ { "buildid-cache", cmd_buildid_cache, 0 },
+ { "buildid-list", cmd_buildid_list, 0 },
+ { "diff", cmd_diff, 0 },
+ { "evlist", cmd_evlist, 0 },
+ { "help", cmd_help, 0 },
+ { "list", cmd_list, 0 },
+ { "record", cmd_record, 0 },
+ { "report", cmd_report, 0 },
+ { "bench", cmd_bench, 0 },
+ { "stat", cmd_stat, 0 },
+ { "timechart", cmd_timechart, 0 },
+ { "top", cmd_top, 0 },
+ { "annotate", cmd_annotate, 0 },
+ { "version", cmd_version, 0 },
+ { "script", cmd_script, 0 },
+ { "sched", cmd_sched, 0 },
+#ifndef NO_LIBELF_SUPPORT
+ { "probe", cmd_probe, 0 },
+#endif
+ { "kmem", cmd_kmem, 0 },
+ { "lock", cmd_lock, 0 },
+ { "kvm", cmd_kvm, 0 },
+ { "test", cmd_test, 0 },
+#ifndef NO_LIBAUDIT_SUPPORT
+ { "trace", cmd_trace, 0 },
+#endif
+ { "inject", cmd_inject, 0 },
+};
+
struct pager_config {
const char *cmd;
int val;
@@ -160,6 +197,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
+ } else if (!strcmp(cmd, "--list-cmds")) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ struct cmd_struct *p = commands+i;
+ printf("%s ", p->cmd);
+ }
+ exit(0);
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
usage(perf_usage_string);
@@ -245,12 +290,6 @@ const char perf_version_string[] = PERF_VERSION;
*/
#define NEED_WORK_TREE (1<<2)
-struct cmd_struct {
- const char *cmd;
- int (*fn)(int, const char **, const char *);
- int option;
-};
-
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status;
@@ -296,30 +335,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
- static struct cmd_struct commands[] = {
- { "buildid-cache", cmd_buildid_cache, 0 },
- { "buildid-list", cmd_buildid_list, 0 },
- { "diff", cmd_diff, 0 },
- { "evlist", cmd_evlist, 0 },
- { "help", cmd_help, 0 },
- { "list", cmd_list, 0 },
- { "record", cmd_record, 0 },
- { "report", cmd_report, 0 },
- { "bench", cmd_bench, 0 },
- { "stat", cmd_stat, 0 },
- { "timechart", cmd_timechart, 0 },
- { "top", cmd_top, 0 },
- { "annotate", cmd_annotate, 0 },
- { "version", cmd_version, 0 },
- { "script", cmd_script, 0 },
- { "sched", cmd_sched, 0 },
- { "probe", cmd_probe, 0 },
- { "kmem", cmd_kmem, 0 },
- { "lock", cmd_lock, 0 },
- { "kvm", cmd_kvm, 0 },
- { "test", cmd_test, 0 },
- { "inject", cmd_inject, 0 },
- };
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index f960ccb2edc..87f4ec6d1f3 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -209,9 +209,15 @@ void pthread__unblock_sigwinch(void);
#include "util/target.h"
+enum perf_call_graph_mode {
+ CALLCHAIN_NONE,
+ CALLCHAIN_FP,
+ CALLCHAIN_DWARF
+};
+
struct perf_record_opts {
struct perf_target target;
- bool call_graph;
+ int call_graph;
bool group;
bool inherit_stat;
bool no_delay;
@@ -230,6 +236,7 @@ struct perf_record_opts {
u64 branch_stack;
u64 default_interval;
u64 user_interval;
+ u16 stack_dump_size;
};
#endif
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
new file mode 100755
index 00000000000..9e0985794e2
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
@@ -0,0 +1,94 @@
+# EventClass.py
+#
+# This is a library defining some events types classes, which could
+# be used by other scripts to analyzing the perf samples.
+#
+# Currently there are just a few classes defined for examples,
+# PerfEvent is the base class for all perf event sample, PebsEvent
+# is a HW base Intel x86 PEBS event, and user could add more SW/HW
+# event classes based on requirements.
+
+import struct
+
+# Event types, user could add more here
+EVTYPE_GENERIC = 0
+EVTYPE_PEBS = 1 # Basic PEBS event
+EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
+EVTYPE_IBS = 3
+
+#
+# Currently we don't have good way to tell the event type, but by
+# the size of raw buffer, raw PEBS event with load latency data's
+# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
+#
+def create_event(name, comm, dso, symbol, raw_buf):
+ if (len(raw_buf) == 144):
+ event = PebsEvent(name, comm, dso, symbol, raw_buf)
+ elif (len(raw_buf) == 176):
+ event = PebsNHM(name, comm, dso, symbol, raw_buf)
+ else:
+ event = PerfEvent(name, comm, dso, symbol, raw_buf)
+
+ return event
+
+class PerfEvent(object):
+ event_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
+ self.name = name
+ self.comm = comm
+ self.dso = dso
+ self.symbol = symbol
+ self.raw_buf = raw_buf
+ self.ev_type = ev_type
+ PerfEvent.event_num += 1
+
+ def show(self):
+ print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
+
+#
+# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
+# contains the context info when that event happened: the EFLAGS and
+# linear IP info, as well as all the registers.
+#
+class PebsEvent(PerfEvent):
+ pebs_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
+ tmp_buf=raw_buf[0:80]
+ flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
+ self.flags = flags
+ self.ip = ip
+ self.ax = ax
+ self.bx = bx
+ self.cx = cx
+ self.dx = dx
+ self.si = si
+ self.di = di
+ self.bp = bp
+ self.sp = sp
+
+ PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+ PebsEvent.pebs_num += 1
+ del tmp_buf
+
+#
+# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
+# in the four 64 bit words write after the PEBS data:
+# Status: records the IA32_PERF_GLOBAL_STATUS register value
+# DLA: Data Linear Address (EIP)
+# DSE: Data Source Encoding, where the latency happens, hit or miss
+# in L1/L2/L3 or IO operations
+# LAT: the actual latency in cycles
+#
+class PebsNHM(PebsEvent):
+ pebs_nhm_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
+ tmp_buf=raw_buf[144:176]
+ status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
+ self.status = status
+ self.dla = dla
+ self.dse = dse
+ self.lat = lat
+
+ PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+ PebsNHM.pebs_nhm_num += 1
+ del tmp_buf
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-record b/tools/perf/scripts/python/bin/event_analyzing_sample-record
new file mode 100644
index 00000000000..5ce652dabd0
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# event_analyzing_sample.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-report b/tools/perf/scripts/python/bin/event_analyzing_sample-report
new file mode 100644
index 00000000000..0941fc94e15
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: analyze all perf samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
new file mode 100644
index 00000000000..163c39fa12d
--- /dev/null
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -0,0 +1,189 @@
+# event_analyzing_sample.py: general event handler in python
+#
+# Current perf report is already very powerful with the annotation integrated,
+# and this script is not trying to be as powerful as perf report, but
+# providing end user/developer a flexible way to analyze the events other
+# than trace points.
+#
+# The 2 database related functions in this script just show how to gather
+# the basic information, and users can modify and write their own functions
+# according to their specific requirement.
+#
+# The first function "show_general_events" just does a basic grouping for all
+# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
+# for a x86 HW PMU event: PEBS with load latency data.
+#
+
+import os
+import sys
+import math
+import struct
+import sqlite3
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from EventClass import *
+
+#
+# If the perf.data has a big number of samples, then the insert operation
+# will be very time consuming (about 10+ minutes for 10000 samples) if the
+# .db database is on disk. Move the .db file to RAM based FS to speedup
+# the handling, which will cut the time down to several seconds.
+#
+con = sqlite3.connect("/dev/shm/perf.db")
+con.isolation_level = None
+
+def trace_begin():
+ print "In trace_begin:\n"
+
+ #
+ # Will create several tables at the start, pebs_ll is for PEBS data with
+ # load latency info, while gen_events is for general event.
+ #
+ con.execute("""
+ create table if not exists gen_events (
+ name text,
+ symbol text,
+ comm text,
+ dso text
+ );""")
+ con.execute("""
+ create table if not exists pebs_ll (
+ name text,
+ symbol text,
+ comm text,
+ dso text,
+ flags integer,
+ ip integer,
+ status integer,
+ dse integer,
+ dla integer,
+ lat integer
+ );""")
+
+#
+# Create and insert event object to a database so that user could
+# do more analysis with simple database commands.
+#
+def process_event(param_dict):
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if (param_dict.has_key("dso")):
+ dso = param_dict["dso"]
+ else:
+ dso = "Unknown_dso"
+
+ if (param_dict.has_key("symbol")):
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "Unknown_symbol"
+
+ # Create the event object and insert it to the right table in database
+ event = create_event(name, comm, dso, symbol, raw_buf)
+ insert_db(event)
+
+def insert_db(event):
+ if event.ev_type == EVTYPE_GENERIC:
+ con.execute("insert into gen_events values(?, ?, ?, ?)",
+ (event.name, event.symbol, event.comm, event.dso))
+ elif event.ev_type == EVTYPE_PEBS_LL:
+ event.ip &= 0x7fffffffffffffff
+ event.dla &= 0x7fffffffffffffff
+ con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
+ (event.name, event.symbol, event.comm, event.dso, event.flags,
+ event.ip, event.status, event.dse, event.dla, event.lat))
+
+def trace_end():
+ print "In trace_end:\n"
+ # We show the basic info for the 2 type of event classes
+ show_general_events()
+ show_pebs_ll()
+ con.close()
+
+#
+# As the event number may be very big, so we can't use linear way
+# to show the histogram in real number, but use a log2 algorithm.
+#
+
+def num2sym(num):
+ # Each number will have at least one '#'
+ snum = '#' * (int)(math.log(num, 2) + 1)
+ return snum
+
+def show_general_events():
+
+ # Check the total record number in the table
+ count = con.execute("select count(*) from gen_events")
+ for t in count:
+ print "There is %d records in gen_events table" % t[0]
+ if t[0] == 0:
+ return
+
+ print "Statistics about the general events grouped by thread/symbol/dso: \n"
+
+ # Group by thread
+ commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
+ print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ for row in commq:
+ print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by symbol
+ print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
+ for row in symbolq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by dso
+ print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
+ for row in dsoq:
+ print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+#
+# This function just shows the basic info, and we could do more with the
+# data in the tables, like checking the function parameters when some
+# big latency events happen.
+#
+def show_pebs_ll():
+
+ count = con.execute("select count(*) from pebs_ll")
+ for t in count:
+ print "There is %d records in pebs_ll table" % t[0]
+ if t[0] == 0:
+ return
+
+ print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+
+ # Group by thread
+ commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
+ print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ for row in commq:
+ print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by symbol
+ print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
+ for row in symbolq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by dse
+ dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
+ print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ for row in dseq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by latency
+ latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
+ print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ for row in latq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 1818a531f1d..4aeb7d5df93 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -269,7 +269,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
return err ? 0 : -1;
}
-void ui_browser__hide(struct ui_browser *browser __used)
+void ui_browser__hide(struct ui_browser *browser __maybe_unused)
{
pthread_mutex_lock(&ui__lock);
ui_helpline__pop();
@@ -518,7 +518,7 @@ static struct ui_browser__colorset {
static int ui_browser__color_config(const char *var, const char *value,
- void *data __used)
+ void *data __maybe_unused)
{
char *fg = NULL, *bg;
int i;
@@ -602,7 +602,8 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
SLsmg_set_char_set(0);
}
-void ui_browser__write_graph(struct ui_browser *browser __used, int graph)
+void ui_browser__write_graph(struct ui_browser *browser __maybe_unused,
+ int graph)
{
SLsmg_set_char_set(1);
SLsmg_write_char(graph);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 67a2703e666..8f8cd2d73b3 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -54,7 +54,8 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin
return (struct browser_disasm_line *)(dl + 1);
}
-static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __maybe_unused,
+ void *entry)
{
if (annotate_browser__opts.hide_src_code) {
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
@@ -928,7 +929,8 @@ static int annotate_config__cmp(const void *name, const void *cfgp)
return strcmp(name, cfg->name);
}
-static int annotate__config(const char *var, const char *value, void *data __used)
+static int annotate__config(const char *var, const char *value,
+ void *data __maybe_unused)
{
struct annotate__config *cfg;
const char *name;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 413bd62eedb..a21f40bebba 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -24,9 +24,12 @@ struct hist_browser {
struct hist_entry *he_selection;
struct map_symbol *selection;
int print_seq;
+ bool show_dso;
bool has_symbols;
};
+extern void hist_browser__init_hpp(void);
+
static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name);
@@ -376,12 +379,19 @@ out:
}
static char *callchain_list__sym_name(struct callchain_list *cl,
- char *bf, size_t bfsize)
+ char *bf, size_t bfsize, bool show_dso)
{
+ int printed;
+
if (cl->ms.sym)
- return cl->ms.sym->name;
+ printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name);
+ else
+ printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
+
+ if (show_dso)
+ scnprintf(bf + printed, bfsize - printed, " %s",
+ cl->ms.map ? cl->ms.map->dso->short_name : "unknown");
- snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
return bf;
}
@@ -417,7 +427,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
remaining -= cumul;
list_for_each_entry(chain, &child->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ char bf[1024], *alloc_str;
const char *str;
int color;
bool was_first = first;
@@ -434,7 +444,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
}
alloc_str = NULL;
- str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ str = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
if (was_first) {
double percent = cumul * 100.0 / new_total;
@@ -493,7 +504,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser,
char folded_sign = ' ';
list_for_each_entry(chain, &node->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *s;
+ char bf[1024], *s;
int color;
folded_sign = callchain_list__folded(chain);
@@ -510,7 +521,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser,
*is_current_entry = true;
}
- s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ s = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
ui_browser__gotorc(&browser->b, row, 0);
ui_browser__set_color(&browser->b, color);
slsmg_write_nstring(" ", offset);
@@ -553,14 +565,47 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
return row - first_row;
}
+#define HPP__COLOR_FN(_name, _field) \
+static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ double percent = 100.0 * he->_field / hpp->total_period; \
+ *(double *)hpp->ptr = percent; \
+ return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \
+}
+
+HPP__COLOR_FN(overhead, period)
+HPP__COLOR_FN(overhead_sys, period_sys)
+HPP__COLOR_FN(overhead_us, period_us)
+HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
+HPP__COLOR_FN(overhead_guest_us, period_guest_us)
+
+#undef HPP__COLOR_FN
+
+void hist_browser__init_hpp(void)
+{
+ perf_hpp__init(false, false);
+
+ perf_hpp__format[PERF_HPP__OVERHEAD].color =
+ hist_browser__hpp_color_overhead;
+ perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
+ hist_browser__hpp_color_overhead_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
+ hist_browser__hpp_color_overhead_us;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
+ hist_browser__hpp_color_overhead_guest_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
+ hist_browser__hpp_color_overhead_guest_us;
+}
+
static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
char s[256];
double percent;
- int printed = 0;
- int width = browser->b.width - 6; /* The percentage */
+ int i, printed = 0;
+ int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
@@ -576,35 +621,50 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
if (row_offset == 0) {
- hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
- percent = (entry->period * 100.0) / browser->hists->stats.total_period;
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .total_period = browser->hists->stats.total_period,
+ };
- ui_browser__set_percent_color(&browser->b, percent, current_entry);
ui_browser__gotorc(&browser->b, row, 0);
- if (symbol_conf.use_callchain) {
- slsmg_printf("%c ", folded_sign);
- width -= 2;
- }
- slsmg_printf(" %5.2f%%", percent);
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
- /* The scroll bar isn't being used */
- if (!browser->b.navkeypressed)
- width += 1;
+ if (i) {
+ slsmg_printf(" ");
+ width -= 2;
+ }
- if (!current_entry || !browser->b.navkeypressed)
- ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+ if (perf_hpp__format[i].color) {
+ hpp.ptr = &percent;
+ /* It will set percent for us. See HPP__COLOR_FN above. */
+ width -= perf_hpp__format[i].color(&hpp, entry);
- if (symbol_conf.show_nr_samples) {
- slsmg_printf(" %11u", entry->nr_events);
- width -= 12;
- }
+ ui_browser__set_percent_color(&browser->b, percent, current_entry);
+
+ if (i == 0 && symbol_conf.use_callchain) {
+ slsmg_printf("%c ", folded_sign);
+ width -= 2;
+ }
+
+ slsmg_printf("%s", s);
- if (symbol_conf.show_total_period) {
- slsmg_printf(" %12" PRIu64, entry->period);
- width -= 13;
+ if (!current_entry || !browser->b.navkeypressed)
+ ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+ } else {
+ width -= perf_hpp__format[i].entry(&hpp, entry);
+ slsmg_printf("%s", s);
+ }
}
+ /* The scroll bar isn't being used */
+ if (!browser->b.navkeypressed)
+ width += 1;
+
+ hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists);
slsmg_write_nstring(s, width);
++row;
++printed;
@@ -830,7 +890,7 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro
remaining -= cumul;
list_for_each_entry(chain, &child->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ char bf[1024], *alloc_str;
const char *str;
bool was_first = first;
@@ -842,7 +902,8 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro
folded_sign = callchain_list__folded(chain);
alloc_str = NULL;
- str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ str = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
if (was_first) {
double percent = cumul * 100.0 / new_total;
@@ -880,10 +941,10 @@ static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
int printed = 0;
list_for_each_entry(chain, &node->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *s;
+ char bf[1024], *s;
folded_sign = callchain_list__folded(chain);
- s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso);
printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
}
@@ -920,7 +981,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
if (symbol_conf.use_callchain)
folded_sign = hist_entry__folded(he);
- hist_entry__snprintf(he, s, sizeof(s), browser->hists);
+ hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists);
percent = (he->period * 100.0) / browser->hists->stats.total_period;
if (symbol_conf.use_callchain)
@@ -1133,6 +1194,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
continue;
case 'd':
goto zoom_dso;
+ case 'V':
+ browser->show_dso = !browser->show_dso;
+ continue;
case 't':
goto zoom_thread;
case '/':
@@ -1164,6 +1228,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"d Zoom into current DSO\n"
"t Zoom into current Thread\n"
"P Print histograms to perf.hist.N\n"
+ "V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name");
continue;
case K_ENTER:
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c
index ec12e0b4ded..7ff99ec1d95 100644
--- a/tools/perf/ui/gtk/browser.c
+++ b/tools/perf/ui/gtk/browser.c
@@ -3,6 +3,7 @@
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
+#include "../helpline.h"
#include "gtk.h"
#include <signal.h>
@@ -35,6 +36,57 @@ static void perf_gtk__resize_window(GtkWidget *window)
gtk_window_resize(GTK_WINDOW(window), width, height);
}
+static const char *perf_gtk__get_percent_color(double percent)
+{
+ if (percent >= MIN_RED)
+ return "<span fgcolor='red'>";
+ if (percent >= MIN_GREEN)
+ return "<span fgcolor='dark green'>";
+ return NULL;
+}
+
+#define HPP__COLOR_FN(_name, _field) \
+static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ double percent = 100.0 * he->_field / hpp->total_period; \
+ const char *markup; \
+ int ret = 0; \
+ \
+ markup = perf_gtk__get_percent_color(percent); \
+ if (markup) \
+ ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \
+ ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \
+ if (markup) \
+ ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \
+ \
+ return ret; \
+}
+
+HPP__COLOR_FN(overhead, period)
+HPP__COLOR_FN(overhead_sys, period_sys)
+HPP__COLOR_FN(overhead_us, period_us)
+HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
+HPP__COLOR_FN(overhead_guest_us, period_guest_us)
+
+#undef HPP__COLOR_FN
+
+void perf_gtk__init_hpp(void)
+{
+ perf_hpp__init(false, false);
+
+ perf_hpp__format[PERF_HPP__OVERHEAD].color =
+ perf_gtk__hpp_color_overhead;
+ perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
+ perf_gtk__hpp_color_overhead_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
+ perf_gtk__hpp_color_overhead_us;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
+ perf_gtk__hpp_color_overhead_guest_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
+ perf_gtk__hpp_color_overhead_guest_us;
+}
+
static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
{
GType col_types[MAX_COLUMNS];
@@ -42,15 +94,25 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
struct sort_entry *se;
GtkListStore *store;
struct rb_node *nd;
- u64 total_period;
GtkWidget *view;
- int col_idx;
+ int i, col_idx;
int nr_cols;
+ char s[512];
+
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .total_period = hists->stats.total_period,
+ };
nr_cols = 0;
- /* The percentage column */
- col_types[nr_cols++] = G_TYPE_STRING;
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+
+ col_types[nr_cols++] = G_TYPE_STRING;
+ }
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@@ -67,11 +129,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
col_idx = 0;
- /* The percentage column */
- gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
- -1, "Overhead (%)",
- renderer, "text",
- col_idx++, NULL);
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+
+ perf_hpp__format[i].header(&hpp);
+
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, s,
+ renderer, "markup",
+ col_idx++, NULL);
+ }
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@@ -87,13 +155,9 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
g_object_unref(GTK_TREE_MODEL(store));
- total_period = hists->stats.total_period;
-
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
- double percent;
- char s[512];
if (h->filtered)
continue;
@@ -102,11 +166,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
col_idx = 0;
- percent = (h->period * 100.0) / total_period;
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
- snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
+ if (perf_hpp__format[i].color)
+ perf_hpp__format[i].color(&hpp, h);
+ else
+ perf_hpp__format[i].entry(&hpp, h);
- gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ }
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@@ -166,9 +236,10 @@ static GtkWidget *perf_gtk__setup_statusbar(void)
}
int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
- const char *help __used,
- void (*timer) (void *arg)__used,
- void *arg __used, int delay_secs __used)
+ const char *help,
+ void (*timer) (void *arg)__maybe_unused,
+ void *arg __maybe_unused,
+ int delay_secs __maybe_unused)
{
struct perf_evsel *pos;
GtkWidget *vbox;
@@ -233,6 +304,8 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+ ui_helpline__push(help);
+
gtk_main();
perf_gtk__deactivate_context(&pgctx);
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index a4d0f2b4a2d..687af0bba18 100644
--- a/tools/perf/ui/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
@@ -29,6 +29,9 @@ static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx)
struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window);
int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
+void perf_gtk__init_helpline(void);
+void perf_gtk__init_hpp(void);
+
#ifndef HAVE_GTK_INFO_BAR
static inline GtkWidget *perf_gtk__setup_info_bar(void)
{
diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c
new file mode 100644
index 00000000000..5db4432ff12
--- /dev/null
+++ b/tools/perf/ui/gtk/helpline.c
@@ -0,0 +1,56 @@
+#include <stdio.h>
+#include <string.h>
+
+#include "gtk.h"
+#include "../ui.h"
+#include "../helpline.h"
+#include "../../util/debug.h"
+
+static void gtk_helpline_pop(void)
+{
+ if (!perf_gtk__is_active_context(pgctx))
+ return;
+
+ gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
+ pgctx->statbar_ctx_id);
+}
+
+static void gtk_helpline_push(const char *msg)
+{
+ if (!perf_gtk__is_active_context(pgctx))
+ return;
+
+ gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
+ pgctx->statbar_ctx_id, msg);
+}
+
+static struct ui_helpline gtk_helpline_fns = {
+ .pop = gtk_helpline_pop,
+ .push = gtk_helpline_push,
+};
+
+void perf_gtk__init_helpline(void)
+{
+ helpline_fns = &gtk_helpline_fns;
+}
+
+int perf_gtk__show_helpline(const char *fmt, va_list ap)
+{
+ int ret;
+ char *ptr;
+ static int backlog;
+
+ ret = vscnprintf(ui_helpline__current + backlog,
+ sizeof(ui_helpline__current) - backlog, fmt, ap);
+ backlog += ret;
+
+ /* only first line can be displayed */
+ ptr = strchr(ui_helpline__current, '\n');
+ if (ptr && (ptr - ui_helpline__current) <= backlog) {
+ *ptr = '\0';
+ ui_helpline__puts(ui_helpline__current);
+ backlog = 0;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c
index 92879ce61e2..3c4c6ef7828 100644
--- a/tools/perf/ui/gtk/setup.c
+++ b/tools/perf/ui/gtk/setup.c
@@ -7,11 +7,15 @@ extern struct perf_error_ops perf_gtk_eops;
int perf_gtk__init(void)
{
perf_error__register(&perf_gtk_eops);
+ perf_gtk__init_helpline();
+ perf_gtk__init_hpp();
return gtk_init_check(NULL, NULL) ? 0 : -1;
}
-void perf_gtk__exit(bool wait_for_ok __used)
+void perf_gtk__exit(bool wait_for_ok __maybe_unused)
{
+ if (!perf_gtk__is_active_context(pgctx))
+ return;
perf_error__unregister(&perf_gtk_eops);
gtk_main_quit();
}
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
index 0ead373c0df..8aada5b3c04 100644
--- a/tools/perf/ui/gtk/util.c
+++ b/tools/perf/ui/gtk/util.c
@@ -117,13 +117,8 @@ struct perf_error_ops perf_gtk_eops = {
* For now, just add stubs for NO_NEWT=1 build.
*/
#ifdef NO_NEWT_SUPPORT
-int ui_helpline__show_help(const char *format __used, va_list ap __used)
-{
- return 0;
-}
-
-void ui_progress__update(u64 curr __used, u64 total __used,
- const char *title __used)
+void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused,
+ const char *title __maybe_unused)
{
}
#endif
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
index 2f950c2641c..a49bcf3c190 100644
--- a/tools/perf/ui/helpline.c
+++ b/tools/perf/ui/helpline.c
@@ -5,23 +5,32 @@
#include "../debug.h"
#include "helpline.h"
#include "ui.h"
-#include "libslang.h"
-void ui_helpline__pop(void)
+char ui_helpline__current[512];
+
+static void nop_helpline__pop(void)
{
}
-char ui_helpline__current[512];
+static void nop_helpline__push(const char *msg __maybe_unused)
+{
+}
-void ui_helpline__push(const char *msg)
+static struct ui_helpline default_helpline_fns = {
+ .pop = nop_helpline__pop,
+ .push = nop_helpline__push,
+};
+
+struct ui_helpline *helpline_fns = &default_helpline_fns;
+
+void ui_helpline__pop(void)
{
- const size_t sz = sizeof(ui_helpline__current);
+ helpline_fns->pop();
+}
- SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
- SLsmg_set_color(0);
- SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
- SLsmg_refresh();
- strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
+void ui_helpline__push(const char *msg)
+{
+ helpline_fns->push(msg);
}
void ui_helpline__vpush(const char *fmt, va_list ap)
@@ -50,30 +59,3 @@ void ui_helpline__puts(const char *msg)
ui_helpline__pop();
ui_helpline__push(msg);
}
-
-void ui_helpline__init(void)
-{
- ui_helpline__puts(" ");
-}
-
-char ui_helpline__last_msg[1024];
-
-int ui_helpline__show_help(const char *format, va_list ap)
-{
- int ret;
- static int backlog;
-
- pthread_mutex_lock(&ui__lock);
- ret = vscnprintf(ui_helpline__last_msg + backlog,
- sizeof(ui_helpline__last_msg) - backlog, format, ap);
- backlog += ret;
-
- if (ui_helpline__last_msg[backlog - 1] == '\n') {
- ui_helpline__puts(ui_helpline__last_msg);
- SLsmg_refresh();
- backlog = 0;
- }
- pthread_mutex_unlock(&ui__lock);
-
- return ret;
-}
diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h
index 7bab6b34e35..2b667ee454c 100644
--- a/tools/perf/ui/helpline.h
+++ b/tools/perf/ui/helpline.h
@@ -4,13 +4,44 @@
#include <stdio.h>
#include <stdarg.h>
+#include "../util/cache.h"
+
+struct ui_helpline {
+ void (*pop)(void);
+ void (*push)(const char *msg);
+};
+
+extern struct ui_helpline *helpline_fns;
+
void ui_helpline__init(void);
+
void ui_helpline__pop(void);
void ui_helpline__push(const char *msg);
void ui_helpline__vpush(const char *fmt, va_list ap);
void ui_helpline__fpush(const char *fmt, ...);
void ui_helpline__puts(const char *msg);
-extern char ui_helpline__current[];
+extern char ui_helpline__current[512];
+
+#ifdef NO_NEWT_SUPPORT
+static inline int ui_helpline__show_help(const char *format __maybe_unused,
+ va_list ap __maybe_unused)
+{
+ return 0;
+}
+#else
+extern char ui_helpline__last_msg[];
+int ui_helpline__show_help(const char *format, va_list ap);
+#endif /* NO_NEWT_SUPPORT */
+
+#ifdef NO_GTK2_SUPPORT
+static inline int perf_gtk__show_helpline(const char *format __maybe_unused,
+ va_list ap __maybe_unused)
+{
+ return 0;
+}
+#else
+int perf_gtk__show_helpline(const char *format, va_list ap);
+#endif /* NO_GTK2_SUPPORT */
#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
new file mode 100644
index 00000000000..e3f8cd46e7d
--- /dev/null
+++ b/tools/perf/ui/hist.c
@@ -0,0 +1,390 @@
+#include <math.h>
+
+#include "../util/hist.h"
+#include "../util/util.h"
+#include "../util/sort.h"
+
+
+/* hist period print (hpp) functions */
+static int hpp__header_overhead(struct perf_hpp *hpp)
+{
+ const char *fmt = hpp->ptr ? "Baseline" : "Overhead";
+
+ return scnprintf(hpp->buf, hpp->size, fmt);
+}
+
+static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused)
+{
+ return 8;
+}
+
+static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period / hpp->total_period;
+
+ if (hpp->ptr) {
+ struct hists *old_hists = hpp->ptr;
+ u64 total_period = old_hists->stats.total_period;
+ u64 base_period = he->pair ? he->pair->period : 0;
+
+ if (total_period)
+ percent = 100.0 * base_period / total_period;
+ else
+ percent = 0.0;
+ }
+
+ return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
+}
+
+static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%";
+
+ if (hpp->ptr) {
+ struct hists *old_hists = hpp->ptr;
+ u64 total_period = old_hists->stats.total_period;
+ u64 base_period = he->pair ? he->pair->period : 0;
+
+ if (total_period)
+ percent = 100.0 * base_period / total_period;
+ else
+ percent = 0.0;
+ }
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_sys(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "sys");
+}
+
+static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused)
+{
+ return 7;
+}
+
+static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_sys / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
+}
+
+static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_sys / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_us(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "user");
+}
+
+static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused)
+{
+ return 7;
+}
+
+static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_us / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
+}
+
+static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_us / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp)
+{
+ return scnprintf(hpp->buf, hpp->size, "guest sys");
+}
+
+static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused)
+{
+ return 9;
+}
+
+static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_sys / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
+}
+
+static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_sys / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_guest_us(struct perf_hpp *hpp)
+{
+ return scnprintf(hpp->buf, hpp->size, "guest usr");
+}
+
+static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused)
+{
+ return 9;
+}
+
+static int hpp__color_overhead_guest_us(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_us / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
+}
+
+static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_us / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_samples(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%11s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "Samples");
+}
+
+static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused)
+{
+ return 11;
+}
+
+static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64;
+
+ return scnprintf(hpp->buf, hpp->size, fmt, he->nr_events);
+}
+
+static int hpp__header_period(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "Period");
+}
+
+static int hpp__width_period(struct perf_hpp *hpp __maybe_unused)
+{
+ return 12;
+}
+
+static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64;
+
+ return scnprintf(hpp->buf, hpp->size, fmt, he->period);
+}
+
+static int hpp__header_delta(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "Delta");
+}
+
+static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused)
+{
+ return 7;
+}
+
+static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ struct hists *pair_hists = hpp->ptr;
+ u64 old_total, new_total;
+ double old_percent = 0, new_percent = 0;
+ double diff;
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s";
+ char buf[32] = " ";
+
+ old_total = pair_hists->stats.total_period;
+ if (old_total > 0 && he->pair)
+ old_percent = 100.0 * he->pair->period / old_total;
+
+ new_total = hpp->total_period;
+ if (new_total > 0)
+ new_percent = 100.0 * he->period / new_total;
+
+ diff = new_percent - old_percent;
+ if (fabs(diff) >= 0.01)
+ scnprintf(buf, sizeof(buf), "%+4.2F%%", diff);
+
+ return scnprintf(hpp->buf, hpp->size, fmt, buf);
+}
+
+static int hpp__header_displ(struct perf_hpp *hpp)
+{
+ return scnprintf(hpp->buf, hpp->size, "Displ.");
+}
+
+static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused)
+{
+ return 6;
+}
+
+static int hpp__entry_displ(struct perf_hpp *hpp,
+ struct hist_entry *he __maybe_unused)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s";
+ char buf[32] = " ";
+
+ if (hpp->displacement)
+ scnprintf(buf, sizeof(buf), "%+4ld", hpp->displacement);
+
+ return scnprintf(hpp->buf, hpp->size, fmt, buf);
+}
+
+#define HPP__COLOR_PRINT_FNS(_name) \
+ .header = hpp__header_ ## _name, \
+ .width = hpp__width_ ## _name, \
+ .color = hpp__color_ ## _name, \
+ .entry = hpp__entry_ ## _name
+
+#define HPP__PRINT_FNS(_name) \
+ .header = hpp__header_ ## _name, \
+ .width = hpp__width_ ## _name, \
+ .entry = hpp__entry_ ## _name
+
+struct perf_hpp_fmt perf_hpp__format[] = {
+ { .cond = true, HPP__COLOR_PRINT_FNS(overhead) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) },
+ { .cond = false, HPP__PRINT_FNS(samples) },
+ { .cond = false, HPP__PRINT_FNS(period) },
+ { .cond = false, HPP__PRINT_FNS(delta) },
+ { .cond = false, HPP__PRINT_FNS(displ) }
+};
+
+#undef HPP__COLOR_PRINT_FNS
+#undef HPP__PRINT_FNS
+
+void perf_hpp__init(bool need_pair, bool show_displacement)
+{
+ if (symbol_conf.show_cpu_utilization) {
+ perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true;
+ perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true;
+
+ if (perf_guest) {
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true;
+ }
+ }
+
+ if (symbol_conf.show_nr_samples)
+ perf_hpp__format[PERF_HPP__SAMPLES].cond = true;
+
+ if (symbol_conf.show_total_period)
+ perf_hpp__format[PERF_HPP__PERIOD].cond = true;
+
+ if (need_pair) {
+ perf_hpp__format[PERF_HPP__DELTA].cond = true;
+
+ if (show_displacement)
+ perf_hpp__format[PERF_HPP__DISPL].cond = true;
+ }
+}
+
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
+{
+ hpp->buf += inc;
+ hpp->size -= inc;
+}
+
+int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
+ bool color)
+{
+ const char *sep = symbol_conf.field_sep;
+ char *start = hpp->buf;
+ int i, ret;
+
+ if (symbol_conf.exclude_other && !he->parent)
+ return 0;
+
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+
+ if (!sep || i > 0) {
+ ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
+ advance_hpp(hpp, ret);
+ }
+
+ if (color && perf_hpp__format[i].color)
+ ret = perf_hpp__format[i].color(hpp, he);
+ else
+ ret = perf_hpp__format[i].entry(hpp, he);
+
+ advance_hpp(hpp, ret);
+ }
+
+ return hpp->buf - start;
+}
+
+int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
+ struct hists *hists)
+{
+ const char *sep = symbol_conf.field_sep;
+ struct sort_entry *se;
+ int ret = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
+ ret += se->se_snprintf(he, s + ret, size - ret,
+ hists__col_len(hists, se->se_width_idx));
+ }
+
+ return ret;
+}
+
+/*
+ * See hists__fprintf to match the column widths
+ */
+unsigned int hists__sort_list_width(struct hists *hists)
+{
+ struct sort_entry *se;
+ int i, ret = 0;
+
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+ if (i)
+ ret += 2;
+
+ ret += perf_hpp__format[i].width(NULL);
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list)
+ if (!se->elide)
+ ret += 2 + hists__col_len(hists, se->se_width_idx);
+
+ if (verbose) /* Addr + origin */
+ ret += 3 + BITS_PER_LONG / 4;
+
+ return ret;
+}
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index 791fb15ce35..bd7d460f844 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -1,6 +1,10 @@
-#include "../cache.h"
-#include "../debug.h"
+#include <pthread.h>
+#include "../util/cache.h"
+#include "../util/debug.h"
+#include "../util/hist.h"
+
+pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
void setup_browser(bool fallback_to_pager)
{
@@ -25,6 +29,8 @@ void setup_browser(bool fallback_to_pager)
use_browser = 0;
if (fallback_to_pager)
setup_pager();
+
+ perf_hpp__init(false, false);
break;
}
}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
new file mode 100644
index 00000000000..882461a4283
--- /dev/null
+++ b/tools/perf/ui/stdio/hist.c
@@ -0,0 +1,498 @@
+#include <stdio.h>
+
+#include "../../util/util.h"
+#include "../../util/hist.h"
+#include "../../util/sort.h"
+
+
+static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
+{
+ int i;
+ int ret = fprintf(fp, " ");
+
+ for (i = 0; i < left_margin; i++)
+ ret += fprintf(fp, " ");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
+ int left_margin)
+{
+ int i;
+ size_t ret = callchain__fprintf_left_margin(fp, left_margin);
+
+ for (i = 0; i < depth; i++)
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "| ");
+ else
+ ret += fprintf(fp, " ");
+
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+ int depth, int depth_mask, int period,
+ u64 total_samples, u64 hits,
+ int left_margin)
+{
+ int i;
+ size_t ret = 0;
+
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ for (i = 0; i < depth; i++) {
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "|");
+ else
+ ret += fprintf(fp, " ");
+ if (!period && i == depth - 1) {
+ double percent;
+
+ percent = hits * 100.0 / total_samples;
+ ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ } else
+ ret += fprintf(fp, "%s", " ");
+ }
+ if (chain->ms.sym)
+ ret += fprintf(fp, "%s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
+
+ return ret;
+}
+
+static struct symbol *rem_sq_bracket;
+static struct callchain_list rem_hits;
+
+static void init_rem_hits(void)
+{
+ rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
+ if (!rem_sq_bracket) {
+ fprintf(stderr, "Not enough memory to display remaining hits\n");
+ return;
+ }
+
+ strcpy(rem_sq_bracket->name, "[...]");
+ rem_hits.ms.sym = rem_sq_bracket;
+}
+
+static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
+ u64 total_samples, int depth,
+ int depth_mask, int left_margin)
+{
+ struct rb_node *node, *next;
+ struct callchain_node *child;
+ struct callchain_list *chain;
+ int new_depth_mask = depth_mask;
+ u64 remaining;
+ size_t ret = 0;
+ int i;
+ uint entries_printed = 0;
+
+ remaining = total_samples;
+
+ node = rb_first(root);
+ while (node) {
+ u64 new_total;
+ u64 cumul;
+
+ child = rb_entry(node, struct callchain_node, rb_node);
+ cumul = callchain_cumul_hits(child);
+ remaining -= cumul;
+
+ /*
+ * The depth mask manages the output of pipes that show
+ * the depth. We don't want to keep the pipes of the current
+ * level for the last child of this depth.
+ * Except if we have remaining filtered hits. They will
+ * supersede the last child
+ */
+ next = rb_next(node);
+ if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ /*
+ * But we keep the older depth mask for the line separator
+ * to keep the level link until we reach the last child
+ */
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
+ left_margin);
+ i = 0;
+ list_for_each_entry(chain, &child->val, list) {
+ ret += ipchain__fprintf_graph(fp, chain, depth,
+ new_depth_mask, i++,
+ total_samples,
+ cumul,
+ left_margin);
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = child->children_hit;
+ else
+ new_total = total_samples;
+
+ ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth),
+ left_margin);
+ node = next;
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL &&
+ remaining && remaining != total_samples) {
+
+ if (!rem_sq_bracket)
+ return ret;
+
+ new_depth_mask &= ~(1 << (depth - 1));
+ ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+ new_depth_mask, 0, total_samples,
+ remaining, left_margin);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
+ u64 total_samples, int left_margin)
+{
+ struct callchain_node *cnode;
+ struct callchain_list *chain;
+ u32 entries_printed = 0;
+ bool printed = false;
+ struct rb_node *node;
+ int i = 0;
+ int ret = 0;
+
+ /*
+ * If have one single callchain root, don't bother printing
+ * its percentage (100 % in fractal mode and the same percentage
+ * than the hist in graph mode). This also avoid one level of column.
+ */
+ node = rb_first(root);
+ if (node && !rb_next(node)) {
+ cnode = rb_entry(node, struct callchain_node, rb_node);
+ list_for_each_entry(chain, &cnode->val, list) {
+ /*
+ * If we sort by symbol, the first entry is the same than
+ * the symbol. No need to print it otherwise it appears as
+ * displayed twice.
+ */
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+ root = &cnode->rb_root;
+ }
+
+ ret += __callchain__fprintf_graph(fp, root, total_samples,
+ 1, 1, left_margin);
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t __callchain__fprintf_flat(FILE *fp,
+ struct callchain_node *self,
+ u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n",
+ (void *)(long)chain->ip);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
+ u64 total_samples)
+{
+ size_t ret = 0;
+ u32 entries_printed = 0;
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+
+ rb_node = rb_first(self);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / to