2009-06-12 08:26:44 +00:00
|
|
|
/*
|
|
|
|
* Dynamic function tracer architecture backend.
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2009
|
|
|
|
*
|
|
|
|
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2009-06-12 08:26:46 +00:00
|
|
|
#include <linux/hardirq.h>
|
2009-06-12 08:26:44 +00:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/ftrace.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
2009-06-12 08:26:47 +00:00
|
|
|
#include <trace/syscall.h>
|
2009-06-12 08:26:44 +00:00
|
|
|
#include <asm/lowcore.h>
|
|
|
|
|
2009-06-12 08:26:46 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
|
2009-06-12 08:26:44 +00:00
|
|
|
void ftrace_disable_code(void);
|
2009-06-12 08:26:46 +00:00
|
|
|
void ftrace_disable_return(void);
|
2009-06-12 08:26:44 +00:00
|
|
|
void ftrace_call_code(void);
|
|
|
|
void ftrace_nop_code(void);
|
|
|
|
|
|
|
|
#define FTRACE_INSN_SIZE 4
|
|
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" .align 4\n"
|
|
|
|
"ftrace_disable_code:\n"
|
|
|
|
" j 0f\n"
|
|
|
|
" .word 0x0024\n"
|
|
|
|
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
|
|
|
|
" basr %r14,%r1\n"
|
2009-06-12 08:26:46 +00:00
|
|
|
"ftrace_disable_return:\n"
|
2009-06-12 08:26:44 +00:00
|
|
|
" lg %r14,8(15)\n"
|
|
|
|
" lgr %r0,%r0\n"
|
|
|
|
"0:\n");
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" .align 4\n"
|
|
|
|
"ftrace_nop_code:\n"
|
|
|
|
" j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" .align 4\n"
|
|
|
|
"ftrace_call_code:\n"
|
|
|
|
" stg %r14,8(%r15)\n");
|
|
|
|
|
|
|
|
#else /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" .align 4\n"
|
|
|
|
"ftrace_disable_code:\n"
|
|
|
|
" j 0f\n"
|
|
|
|
" l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
|
|
|
|
" basr %r14,%r1\n"
|
2009-06-12 08:26:46 +00:00
|
|
|
"ftrace_disable_return:\n"
|
2009-06-12 08:26:44 +00:00
|
|
|
" l %r14,4(%r15)\n"
|
|
|
|
" j 0f\n"
|
|
|
|
" bcr 0,%r7\n"
|
|
|
|
" bcr 0,%r7\n"
|
|
|
|
" bcr 0,%r7\n"
|
|
|
|
" bcr 0,%r7\n"
|
|
|
|
" bcr 0,%r7\n"
|
|
|
|
" bcr 0,%r7\n"
|
|
|
|
"0:\n");
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" .align 4\n"
|
|
|
|
"ftrace_nop_code:\n"
|
|
|
|
" j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" .align 4\n"
|
|
|
|
"ftrace_call_code:\n"
|
|
|
|
" st %r14,4(%r15)\n");
|
|
|
|
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
|
|
|
|
static int ftrace_modify_code(unsigned long ip,
|
|
|
|
void *old_code, int old_size,
|
|
|
|
void *new_code, int new_size)
|
|
|
|
{
|
|
|
|
unsigned char replaced[MCOUNT_INSN_SIZE];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: Due to modules code can disappear and change.
|
|
|
|
* We need to protect against faulting as well as code
|
|
|
|
* changing. We do this by using the probe_kernel_*
|
|
|
|
* functions.
|
|
|
|
* This however is just a simple sanity check.
|
|
|
|
*/
|
|
|
|
if (probe_kernel_read(replaced, (void *)ip, old_size))
|
|
|
|
return -EFAULT;
|
|
|
|
if (memcmp(replaced, old_code, old_size) != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (probe_kernel_write((void *)ip, new_code, new_size))
|
|
|
|
return -EPERM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
return ftrace_modify_code(rec->ip,
|
|
|
|
ftrace_call_code, FTRACE_INSN_SIZE,
|
|
|
|
ftrace_disable_code, MCOUNT_INSN_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
if (addr == MCOUNT_ADDR)
|
|
|
|
return ftrace_make_initial_nop(mod, rec, addr);
|
|
|
|
return ftrace_modify_code(rec->ip,
|
|
|
|
ftrace_call_code, FTRACE_INSN_SIZE,
|
|
|
|
ftrace_nop_code, FTRACE_INSN_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|
|
|
{
|
|
|
|
return ftrace_modify_code(rec->ip,
|
|
|
|
ftrace_nop_code, FTRACE_INSN_SIZE,
|
|
|
|
ftrace_call_code, FTRACE_INSN_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
|
|
{
|
|
|
|
ftrace_dyn_func = (unsigned long)func;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init ftrace_dyn_arch_init(void *data)
|
|
|
|
{
|
|
|
|
*(unsigned long *)data = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2009-06-12 08:26:46 +00:00
|
|
|
|
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
/*
|
|
|
|
* Patch the kernel code at ftrace_graph_caller location:
|
|
|
|
* The instruction there is branch relative on condition. The condition mask
|
|
|
|
* is either all ones (always branch aka disable ftrace_graph_caller) or all
|
|
|
|
* zeroes (nop aka enable ftrace_graph_caller).
|
|
|
|
* Instruction format for brc is a7m4xxxx where m is the condition mask.
|
|
|
|
*/
|
|
|
|
int ftrace_enable_ftrace_graph_caller(void)
|
|
|
|
{
|
|
|
|
unsigned short opcode = 0xa704;
|
|
|
|
|
|
|
|
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
|
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_disable_ftrace_graph_caller(void)
|
|
|
|
{
|
|
|
|
unsigned short opcode = 0xa7f4;
|
|
|
|
|
|
|
|
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
|
|
|
|
{
|
|
|
|
return addr - (ftrace_disable_return - ftrace_disable_code);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
|
|
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
|
|
|
|
{
|
|
|
|
return addr - MCOUNT_OFFSET_RET;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hook the return address and push it in the stack of return addresses
|
|
|
|
* in current thread info.
|
|
|
|
*/
|
|
|
|
unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
|
|
|
|
{
|
|
|
|
struct ftrace_graph_ent trace;
|
|
|
|
|
|
|
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
|
|
|
goto out;
|
function-graph: add stack frame test
In case gcc does something funny with the stack frames, or the return
from function code, we would like to detect that.
An arch may implement passing of a variable that is unique to the
function and can be saved on entering a function and can be tested
when exiting the function. Usually the frame pointer can be used for
this purpose.
This patch also implements this for x86. Where it passes in the stack
frame of the parent function, and will test that frame on exit.
There was a case in x86_32 with optimize for size (-Os) where, for a
few functions, gcc would align the stack frame and place a copy of the
return address into it. The function graph tracer modified the copy and
not the actual return address. On return from the funtion, it did not go
to the tracer hook, but returned to the parent. This broke the function
graph tracer, because the return of the parent (where gcc did not do
this funky manipulation) returned to the location that the child function
was suppose to. This caused strange kernel crashes.
This test detected the problem and pointed out where the issue was.
This modifies the parameters of one of the functions that the arch
specific code calls, so it includes changes to arch code to accommodate
the new prototype.
Note, I notice that the parsic arch implements its own push_return_trace.
This is now a generic function and the ftrace_push_return_trace should be
used instead. This patch does not touch that code.
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-18 16:45:08 +00:00
|
|
|
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
|
2009-06-12 08:26:46 +00:00
|
|
|
goto out;
|
|
|
|
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
|
|
|
|
/* Only trace if the calling function expects to. */
|
|
|
|
if (!ftrace_graph_entry(&trace)) {
|
|
|
|
current->curr_ret_stack--;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
parent = (unsigned long)return_to_handler;
|
|
|
|
out:
|
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
2009-06-12 08:26:47 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_FTRACE_SYSCALLS
|
|
|
|
|
|
|
|
extern unsigned long __start_syscalls_metadata[];
|
|
|
|
extern unsigned long __stop_syscalls_metadata[];
|
|
|
|
extern unsigned int sys_call_table[];
|
|
|
|
|
|
|
|
static struct syscall_metadata **syscalls_metadata;
|
|
|
|
|
|
|
|
struct syscall_metadata *syscall_nr_to_meta(int nr)
|
|
|
|
{
|
|
|
|
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return syscalls_metadata[nr];
|
|
|
|
}
|
|
|
|
|
2009-08-25 12:31:11 +00:00
|
|
|
int syscall_name_to_nr(char *name)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!syscalls_metadata)
|
|
|
|
return -1;
|
|
|
|
for (i = 0; i < NR_syscalls; i++)
|
|
|
|
if (syscalls_metadata[i])
|
|
|
|
if (!strcmp(syscalls_metadata[i]->name, name))
|
|
|
|
return i;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_syscall_enter_id(int num, int id)
|
|
|
|
{
|
|
|
|
syscalls_metadata[num]->enter_id = id;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_syscall_exit_id(int num, int id)
|
|
|
|
{
|
|
|
|
syscalls_metadata[num]->exit_id = id;
|
|
|
|
}
|
|
|
|
|
2009-06-12 08:26:47 +00:00
|
|
|
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
|
|
|
|
{
|
|
|
|
struct syscall_metadata *start;
|
|
|
|
struct syscall_metadata *stop;
|
|
|
|
char str[KSYM_SYMBOL_LEN];
|
|
|
|
|
|
|
|
start = (struct syscall_metadata *)__start_syscalls_metadata;
|
|
|
|
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
|
|
|
|
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
|
|
|
|
|
|
|
|
for ( ; start < stop; start++) {
|
|
|
|
if (start->name && !strcmp(start->name + 3, str + 3))
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-08-25 12:31:11 +00:00
|
|
|
static int __init arch_init_ftrace_syscalls(void)
|
2009-06-12 08:26:47 +00:00
|
|
|
{
|
|
|
|
struct syscall_metadata *meta;
|
|
|
|
int i;
|
|
|
|
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!syscalls_metadata)
|
2009-08-25 12:31:11 +00:00
|
|
|
return -ENOMEM;
|
2009-06-12 08:26:47 +00:00
|
|
|
for (i = 0; i < NR_syscalls; i++) {
|
|
|
|
meta = find_syscall_meta((unsigned long)sys_call_table[i]);
|
|
|
|
syscalls_metadata[i] = meta;
|
|
|
|
}
|
2009-08-25 12:31:11 +00:00
|
|
|
return 0;
|
2009-06-12 08:26:47 +00:00
|
|
|
}
|
2009-08-25 12:31:11 +00:00
|
|
|
arch_initcall(arch_init_ftrace_syscalls);
|
2009-06-12 08:26:47 +00:00
|
|
|
#endif
|