From af085d9084b48530153f51e6cad19fd0b1a13ed7 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:28 -0600 Subject: [PATCH 01/19] stacktrace/x86: add function for detecting reliable stack traces For live patching and possibly other use cases, a stack trace is only useful if it can be assured that it's completely reliable. Add a new save_stack_trace_tsk_reliable() function to achieve that. Note that if the target task isn't the current task, and the target task is allowed to run, then it could be writing the stack while the unwinder is reading it, resulting in possible corruption. So the caller of save_stack_trace_tsk_reliable() must ensure that the task is either 'current' or inactive. save_stack_trace_tsk_reliable() relies on the x86 unwinder's detection of pt_regs on the stack. If the pt_regs are not user-mode registers from a syscall, then they indicate an in-kernel interrupt or exception (e.g. preemption or a page fault), in which case the stack is considered unreliable due to the nature of frame pointers. It also relies on the x86 unwinder's detection of other issues, such as: - corrupted stack data - stack grows the wrong way - stack walk doesn't reach the bottom - user didn't provide a large enough entries array Such issues are reported by checking unwind_error() and !unwind_done(). Also add CONFIG_HAVE_RELIABLE_STACKTRACE so arch-independent code can determine at build time whether the function is implemented. Signed-off-by: Josh Poimboeuf Reviewed-by: Miroslav Benes Acked-by: Ingo Molnar # for the x86 changes Signed-off-by: Jiri Kosina --- arch/Kconfig | 6 +++ arch/x86/Kconfig | 1 + arch/x86/include/asm/unwind.h | 6 +++ arch/x86/kernel/stacktrace.c | 96 +++++++++++++++++++++++++++++++++- arch/x86/kernel/unwind_frame.c | 2 + include/linux/stacktrace.h | 9 ++-- kernel/stacktrace.c | 12 ++++- 7 files changed, 126 insertions(+), 6 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index cd211a14a88f..6ad00ad73459 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -713,6 +713,12 @@ config HAVE_STACK_VALIDATION Architecture supports the 'objtool check' host tool command, which performs compile-time stack metadata validation. +config HAVE_RELIABLE_STACKTRACE + bool + help + Architecture has a save_stack_trace_tsk_reliable() function which + only returns a stack trace if it can guarantee the trace is reliable. + config HAVE_ARCH_HASH bool default n diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc98d5a294ee..2a26852c11b6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -160,6 +160,7 @@ config X86 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION select HAVE_STACK_VALIDATION if X86_64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 6fa75b17aec3..137e9cce2ab4 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -11,6 +11,7 @@ struct unwind_state { unsigned long stack_mask; struct task_struct *task; int graph_idx; + bool error; #ifdef CONFIG_FRAME_POINTER unsigned long *bp, *orig_sp; struct pt_regs *regs; @@ -40,6 +41,11 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, __unwind_start(state, task, regs, first_frame); } +static inline bool unwind_error(struct unwind_state *state) +{ + return state->error; +} + #ifdef CONFIG_FRAME_POINTER static inline diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 8e2b79b88e51..8dabd7bf1673 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -76,6 +76,101 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE + +#define STACKTRACE_DUMP_ONCE(task) ({ \ + static bool __section(.data.unlikely) __dumped; \ + \ + if (!__dumped) { \ + __dumped = true; \ + WARN_ON(1); \ + show_stack(task, NULL); \ + } \ +}) + +static int __save_stack_trace_reliable(struct stack_trace *trace, + struct task_struct *task) +{ + struct unwind_state state; + struct pt_regs *regs; + unsigned long addr; + + for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); + unwind_next_frame(&state)) { + + regs = unwind_get_entry_regs(&state); + if (regs) { + /* + * Kernel mode registers on the stack indicate an + * in-kernel interrupt or exception (e.g., preemption + * or a page fault), which can make frame pointers + * unreliable. + */ + if (!user_mode(regs)) + return -EINVAL; + + /* + * The last frame contains the user mode syscall + * pt_regs. Skip it and finish the unwind. + */ + unwind_next_frame(&state); + if (!unwind_done(&state)) { + STACKTRACE_DUMP_ONCE(task); + return -EINVAL; + } + break; + } + + addr = unwind_get_return_address(&state); + + /* + * A NULL or invalid return address probably means there's some + * generated code which __kernel_text_address() doesn't know + * about. + */ + if (!addr) { + STACKTRACE_DUMP_ONCE(task); + return -EINVAL; + } + + if (save_stack_address(trace, addr, false)) + return -EINVAL; + } + + /* Check for stack corruption */ + if (unwind_error(&state)) { + STACKTRACE_DUMP_ONCE(task); + return -EINVAL; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; + + return 0; +} + +/* + * This function returns an error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. + */ +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + int ret; + + if (!try_get_task_stack(tsk)) + return -EINVAL; + + ret = __save_stack_trace_reliable(trace, tsk); + + put_task_stack(tsk); + + return ret; +} +#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ + /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ struct stack_frame_user { @@ -138,4 +233,3 @@ void save_stack_trace_user(struct stack_trace *trace) if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } - diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 478d15dbaee4..5ed43910e04b 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -225,6 +225,8 @@ bool unwind_next_frame(struct unwind_state *state) return true; bad_address: + state->error = true; + /* * When unwinding a non-current task, the task might actually be * running on another CPU, in which case it could be modifying its diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 0a34489a46b6..4205f71a5f0e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -18,6 +18,8 @@ extern void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); +extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); extern int snprint_stack_trace(char *buf, size_t size, @@ -29,12 +31,13 @@ extern void save_stack_trace_user(struct stack_trace *trace); # define save_stack_trace_user(trace) do { } while (0) #endif -#else +#else /* !CONFIG_STACKTRACE */ # define save_stack_trace(trace) do { } while (0) # define save_stack_trace_tsk(tsk, trace) do { } while (0) # define save_stack_trace_user(trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) # define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) -#endif +# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) +#endif /* CONFIG_STACKTRACE */ -#endif +#endif /* __LINUX_STACKTRACE_H */ diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index 9c15a9124e83..f8edee9c792d 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -54,8 +54,8 @@ int snprint_stack_trace(char *buf, size_t size, EXPORT_SYMBOL_GPL(snprint_stack_trace); /* - * Architectures that do not implement save_stack_trace_tsk or - * save_stack_trace_regs get this weak alias and a once-per-bootup warning + * Architectures that do not implement save_stack_trace_*() + * get these weak aliases and once-per-bootup warnings * (whenever this facility is utilized - for example by procfs): */ __weak void @@ -69,3 +69,11 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) { WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n"); } + +__weak int +save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n"); + return -ENOSYS; +} From 3a404842547c92e71127870a613319a29cdebe49 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:29 -0600 Subject: [PATCH 02/19] x86/entry: define _TIF_ALLWORK_MASK flags explicitly The _TIF_ALLWORK_MASK macro automatically includes the least-significant 16 bits of the thread_info flags, which is less than obvious and tends to create confusion and surprises when reading or modifying the code. Define the flags explicitly. Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Reviewed-by: Miroslav Benes Reviewed-by: Kamalesh Babulal Acked-by: Ingo Molnar # for the x86 changes Signed-off-by: Jiri Kosina --- arch/x86/include/asm/thread_info.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6f5eb07a95..207d0d981287 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -73,9 +73,6 @@ struct thread_info { * thread information flags * - these are process state flags that various assembly files * may need to access - * - pending work-to-be-done flags are in LSW - * - other flags in MSW - * Warning: layout of LSW is hardcoded in entry.S */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ @@ -103,8 +100,8 @@ struct thread_info { #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) -#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -133,8 +130,10 @@ struct thread_info { /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK \ - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ) + (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ + _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \ + _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \ + _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ From 46c5a0113f843be5c55b1c40dd486538891156d4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:30 -0600 Subject: [PATCH 03/19] livepatch: create temporary klp_update_patch_state() stub Create temporary stubs for klp_update_patch_state() so we can add TIF_PATCH_PENDING to different architectures in separate patches without breaking build bisectability. Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 5 ++++- kernel/livepatch/core.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9072f04db616..5cc20e588a22 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -123,10 +123,13 @@ void arch_klp_init_object_loaded(struct klp_patch *patch, int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); +void klp_update_patch_state(struct task_struct *task); + #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } -static inline void klp_module_going(struct module *mod) { } +static inline void klp_module_going(struct module *mod) {} +static inline void klp_update_patch_state(struct task_struct *task) {} #endif /* CONFIG_LIVEPATCH */ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index af4643873e71..217b39d71176 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -64,6 +64,9 @@ static LIST_HEAD(klp_ops); static struct kobject *klp_root_kobj; +/* TODO: temporary stub */ +void klp_update_patch_state(struct task_struct *task) {} + static struct klp_ops *klp_find_ops(unsigned long old_addr) { struct klp_ops *ops; From afb94c9e0b413bbdea779192eaca076c43ede031 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:31 -0600 Subject: [PATCH 04/19] livepatch/x86: add TIF_PATCH_PENDING thread flag Add the TIF_PATCH_PENDING thread flag to enable the new livepatch per-task consistency model for x86_64. The bit getting set indicates the thread has a pending patch which needs to be applied when the thread exits the kernel. The bit is placed in the _TIF_ALLWORK_MASK macro, which results in exit_to_usermode_loop() calling klp_update_patch_state() when it's set. Signed-off-by: Josh Poimboeuf Acked-by: Andy Lutomirski Reviewed-by: Petr Mladek Reviewed-by: Miroslav Benes Reviewed-by: Kamalesh Babulal Acked-by: Ingo Molnar # for the x86 changes Signed-off-by: Jiri Kosina --- arch/x86/entry/common.c | 9 ++++++--- arch/x86/include/asm/thread_info.h | 4 +++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 370c42c7f046..cdefcfdd9e63 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -130,14 +131,13 @@ static long syscall_trace_enter(struct pt_regs *regs) #define EXIT_TO_USERMODE_LOOP_FLAGS \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) + _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { /* * In order to return to user mode, we need to have IRQs off with - * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, - * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags + * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags * can be set at any time on preemptable kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. @@ -164,6 +164,9 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) if (cached_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); + if (cached_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + /* Disable IRQs and retry */ local_irq_disable(); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 207d0d981287..83372dc43943 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -84,6 +84,7 @@ struct thread_info { #define TIF_SECCOMP 8 /* secure computing */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ +#define TIF_PATCH_PENDING 13 /* pending live patching update */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ #define TIF_NOHZ 19 /* in adaptive nohz mode */ @@ -107,6 +108,7 @@ struct thread_info { #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_NOHZ (1 << TIF_NOHZ) @@ -133,7 +135,7 @@ struct thread_info { (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \ _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \ - _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT) + _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ From a768f78429fd9b0b07f5795dd62f31a519fc1bae Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:32 -0600 Subject: [PATCH 05/19] livepatch/powerpc: add TIF_PATCH_PENDING thread flag Add the TIF_PATCH_PENDING thread flag to enable the new livepatch per-task consistency model for powerpc. The bit getting set indicates the thread has a pending patch which needs to be applied when the thread exits the kernel. The bit is included in the _TIF_USER_WORK_MASK macro so that do_notify_resume() and klp_update_patch_state() get called when the bit is set. Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Reviewed-by: Miroslav Benes Reviewed-by: Kamalesh Babulal Reviewed-by: Balbir Singh Acked-by: Michael Ellerman (powerpc) Signed-off-by: Jiri Kosina --- arch/powerpc/include/asm/thread_info.h | 4 +++- arch/powerpc/kernel/signal.c | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 87e4b2d8dcd4..6fc6464f7421 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void) TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ +#define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ #define TIF_NOHZ 9 /* in adaptive nohz mode */ @@ -115,6 +116,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_POLLING_NRFLAG (1< #include #include +#include #include #include #include @@ -162,6 +163,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) tracehook_notify_resume(regs); } + if (thread_info_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + user_enter(); } From 30d64f1904d44a7e52a93d711d57aabbac2efd72 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 13 Feb 2017 19:42:33 -0600 Subject: [PATCH 06/19] livepatch/s390: reorganize TIF thread flag bits Group the TIF thread flag bits by their inclusion in the _TIF_WORK and _TIF_TRACE macros. Signed-off-by: Jiri Slaby Signed-off-by: Josh Poimboeuf Reviewed-by: Miroslav Benes Acked-by: Heiko Carstens Signed-off-by: Jiri Kosina --- arch/s390/include/asm/thread_info.h | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index a5b54a445eb8..4977668788d7 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -51,14 +51,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); /* * thread information flags bit numbers */ +/* _TIF_WORK bits */ #define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ -#define TIF_SECCOMP 5 /* secure computing */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_UPROBE 7 /* breakpointed or single-stepping */ +#define TIF_UPROBE 3 /* breakpointed or single-stepping */ + #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ @@ -66,15 +64,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_BLOCK_STEP 20 /* This task is block stepped */ #define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ +/* _TIF_TRACE bits */ +#define TIF_SYSCALL_TRACE 24 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */ +#define TIF_SECCOMP 26 /* secure computing */ +#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ + #define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) #define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) +#define _TIF_UPROBE _BITUL(TIF_UPROBE) + +#define _TIF_31BIT _BITUL(TIF_31BIT) +#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) + #define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP _BITUL(TIF_SECCOMP) #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) -#define _TIF_UPROBE _BITUL(TIF_UPROBE) -#define _TIF_31BIT _BITUL(TIF_31BIT) -#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) #endif /* _ASM_THREAD_INFO_H */ From 2f09ca60a56dd9c217d32d68340e1b08cbbe1ace Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Mon, 13 Feb 2017 19:42:34 -0600 Subject: [PATCH 07/19] livepatch/s390: add TIF_PATCH_PENDING thread flag Update a task's patch state when returning from a system call or user space interrupt, or after handling a signal. This greatly increases the chances of a patch operation succeeding. If a task is I/O bound, it can be patched when returning from a system call. If a task is CPU bound, it can be patched when returning from an interrupt. If a task is sleeping on a to-be-patched function, the user can send SIGSTOP and SIGCONT to force it to switch. Since there are two ways the syscall can be restarted on return from a signal handling process, it is important to clear the flag before do_signal() is called. Otherwise we could miss the migration if we used SIGSTOP/SIGCONT procedure or fake signal to migrate patching blocking tasks. If we place our hook to sysc_work label in entry before TIF_SIGPENDING is evaluated we kill two birds with one stone. The task is correctly migrated in all return paths from a syscall. Signed-off-by: Miroslav Benes Signed-off-by: Josh Poimboeuf Acked-by: Heiko Carstens Signed-off-by: Jiri Kosina --- arch/s390/include/asm/thread_info.h | 2 ++ arch/s390/kernel/entry.S | 31 ++++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 4977668788d7..646845edf148 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -56,6 +56,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_UPROBE 3 /* breakpointed or single-stepping */ +#define TIF_PATCH_PENDING 4 /* pending live patching update */ #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -74,6 +75,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) #define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) #define _TIF_UPROBE _BITUL(TIF_UPROBE) +#define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) #define _TIF_31BIT _BITUL(TIF_31BIT) #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dff2152350a7..a08b5eea5567 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -47,7 +47,7 @@ STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_UPROBE) + _TIF_UPROBE | _TIF_PATCH_PENDING) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ @@ -334,6 +334,11 @@ ENTRY(system_call) #endif TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP jo .Lsysc_singlestep +#ifdef CONFIG_LIVEPATCH + TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING + jo .Lsysc_patch_pending # handle live patching just before + # signals and possible syscall restart +#endif TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lsysc_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME @@ -408,6 +413,16 @@ ENTRY(system_call) jg uprobe_notify_resume #endif +# +# _TIF_PATCH_PENDING is set, call klp_update_patch_state +# +#ifdef CONFIG_LIVEPATCH +.Lsysc_patch_pending: + lg %r2,__LC_CURRENT # pass pointer to task struct + larl %r14,.Lsysc_return + jg klp_update_patch_state +#endif + # # _PIF_PER_TRAP is set, call do_per_trap # @@ -659,6 +674,10 @@ ENTRY(io_int_handler) jo .Lio_mcck_pending TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jo .Lio_reschedule +#ifdef CONFIG_LIVEPATCH + TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING + jo .Lio_patch_pending +#endif TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lio_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME @@ -707,6 +726,16 @@ ENTRY(io_int_handler) TRACE_IRQS_OFF j .Lio_return +# +# _TIF_PATCH_PENDING is set, call klp_update_patch_state +# +#ifdef CONFIG_LIVEPATCH +.Lio_patch_pending: + lg %r2,__LC_CURRENT # pass pointer to task struct + larl %r14,.Lio_return + jg klp_update_patch_state +#endif + # # _TIF_SIGPENDING or is set, call do_signal # From 0dade9f374f1c15f9b43ab01ab75a3b459bba5f6 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:35 -0600 Subject: [PATCH 08/19] livepatch: separate enabled and patched states Once we have a consistency model, patches and their objects will be enabled and disabled at different times. For example, when a patch is disabled, its loaded objects' funcs can remain registered with ftrace indefinitely until the unpatching operation is complete and they're no longer in use. It's less confusing if we give them different names: patches can be enabled or disabled; objects (and their funcs) can be patched or unpatched: - Enabled means that a patch is logically enabled (but not necessarily fully applied). - Patched means that an object's funcs are registered with ftrace and added to the klp_ops func stack. Also, since these states are binary, represent them with booleans instead of ints. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Reviewed-by: Kamalesh Babulal Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 17 ++++----- kernel/livepatch/core.c | 72 +++++++++++++++++++-------------------- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 5cc20e588a22..9787a63b57ac 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -28,11 +28,6 @@ #include -enum klp_state { - KLP_DISABLED, - KLP_ENABLED -}; - /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched @@ -41,8 +36,8 @@ enum klp_state { * can be found (optional) * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources - * @state: tracks function-level patch application state * @stack_node: list node for klp_ops func_stack list + * @patched: the func has been added to the klp_ops list */ struct klp_func { /* external */ @@ -60,8 +55,8 @@ struct klp_func { /* internal */ unsigned long old_addr; struct kobject kobj; - enum klp_state state; struct list_head stack_node; + bool patched; }; /** @@ -71,7 +66,7 @@ struct klp_func { * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object * (NULL for vmlinux) - * @state: tracks object-level patch application state + * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { /* external */ @@ -81,7 +76,7 @@ struct klp_object { /* internal */ struct kobject kobj; struct module *mod; - enum klp_state state; + bool patched; }; /** @@ -90,7 +85,7 @@ struct klp_object { * @objs: object entries for kernel objects to be patched * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources - * @state: tracks patch-level application state + * @enabled: the patch is enabled (but operation may be incomplete) */ struct klp_patch { /* external */ @@ -100,7 +95,7 @@ struct klp_patch { /* internal */ struct list_head list; struct kobject kobj; - enum klp_state state; + bool enabled; }; #define klp_for_each_object(patch, obj) \ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 217b39d71176..2dbd355cee07 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -348,11 +348,11 @@ static unsigned long klp_get_ftrace_location(unsigned long faddr) } #endif -static void klp_disable_func(struct klp_func *func) +static void klp_unpatch_func(struct klp_func *func) { struct klp_ops *ops; - if (WARN_ON(func->state != KLP_ENABLED)) + if (WARN_ON(!func->patched)) return; if (WARN_ON(!func->old_addr)) return; @@ -378,10 +378,10 @@ static void klp_disable_func(struct klp_func *func) list_del_rcu(&func->stack_node); } - func->state = KLP_DISABLED; + func->patched = false; } -static int klp_enable_func(struct klp_func *func) +static int klp_patch_func(struct klp_func *func) { struct klp_ops *ops; int ret; @@ -389,7 +389,7 @@ static int klp_enable_func(struct klp_func *func) if (WARN_ON(!func->old_addr)) return -EINVAL; - if (WARN_ON(func->state != KLP_DISABLED)) + if (WARN_ON(func->patched)) return -EINVAL; ops = klp_find_ops(func->old_addr); @@ -437,7 +437,7 @@ static int klp_enable_func(struct klp_func *func) list_add_rcu(&func->stack_node, &ops->func_stack); } - func->state = KLP_ENABLED; + func->patched = true; return 0; @@ -448,36 +448,36 @@ err: return ret; } -static void klp_disable_object(struct klp_object *obj) +static void klp_unpatch_object(struct klp_object *obj) { struct klp_func *func; klp_for_each_func(obj, func) - if (func->state == KLP_ENABLED) - klp_disable_func(func); + if (func->patched) + klp_unpatch_func(func); - obj->state = KLP_DISABLED; + obj->patched = false; } -static int klp_enable_object(struct klp_object *obj) +static int klp_patch_object(struct klp_object *obj) { struct klp_func *func; int ret; - if (WARN_ON(obj->state != KLP_DISABLED)) + if (WARN_ON(obj->patched)) return -EINVAL; if (WARN_ON(!klp_is_object_loaded(obj))) return -EINVAL; klp_for_each_func(obj, func) { - ret = klp_enable_func(func); + ret = klp_patch_func(func); if (ret) { - klp_disable_object(obj); + klp_unpatch_object(obj); return ret; } } - obj->state = KLP_ENABLED; + obj->patched = true; return 0; } @@ -488,17 +488,17 @@ static int __klp_disable_patch(struct klp_patch *patch) /* enforce stacking: only the last enabled patch can be disabled */ if (!list_is_last(&patch->list, &klp_patches) && - list_next_entry(patch, list)->state == KLP_ENABLED) + list_next_entry(patch, list)->enabled) return -EBUSY; pr_notice("disabling patch '%s'\n", patch->mod->name); klp_for_each_object(patch, obj) { - if (obj->state == KLP_ENABLED) - klp_disable_object(obj); + if (obj->patched) + klp_unpatch_object(obj); } - patch->state = KLP_DISABLED; + patch->enabled = false; return 0; } @@ -522,7 +522,7 @@ int klp_disable_patch(struct klp_patch *patch) goto err; } - if (patch->state == KLP_DISABLED) { + if (!patch->enabled) { ret = -EINVAL; goto err; } @@ -540,12 +540,12 @@ static int __klp_enable_patch(struct klp_patch *patch) struct klp_object *obj; int ret; - if (WARN_ON(patch->state != KLP_DISABLED)) + if (WARN_ON(patch->enabled)) return -EINVAL; /* enforce stacking: only the first disabled patch can be enabled */ if (patch->list.prev != &klp_patches && - list_prev_entry(patch, list)->state == KLP_DISABLED) + !list_prev_entry(patch, list)->enabled) return -EBUSY; pr_notice("enabling patch '%s'\n", patch->mod->name); @@ -554,12 +554,12 @@ static int __klp_enable_patch(struct klp_patch *patch) if (!klp_is_object_loaded(obj)) continue; - ret = klp_enable_object(obj); + ret = klp_patch_object(obj); if (ret) goto unregister; } - patch->state = KLP_ENABLED; + patch->enabled = true; return 0; @@ -617,20 +617,20 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, if (ret) return -EINVAL; - if (val != KLP_DISABLED && val != KLP_ENABLED) + if (val > 1) return -EINVAL; patch = container_of(kobj, struct klp_patch, kobj); mutex_lock(&klp_mutex); - if (val == patch->state) { + if (patch->enabled == val) { /* already in requested state */ ret = -EINVAL; goto err; } - if (val == KLP_ENABLED) { + if (val) { ret = __klp_enable_patch(patch); if (ret) goto err; @@ -655,7 +655,7 @@ static ssize_t enabled_show(struct kobject *kobj, struct klp_patch *patch; patch = container_of(kobj, struct klp_patch, kobj); - return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state); + return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); } static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); @@ -749,7 +749,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) return -EINVAL; INIT_LIST_HEAD(&func->stack_node); - func->state = KLP_DISABLED; + func->patched = false; /* The format for the sysfs directory is where sympos * is the nth occurrence of this symbol in kallsyms for the patched @@ -804,7 +804,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) if (!obj->funcs) return -EINVAL; - obj->state = KLP_DISABLED; + obj->patched = false; obj->mod = NULL; klp_find_object_module(obj); @@ -845,7 +845,7 @@ static int klp_init_patch(struct klp_patch *patch) mutex_lock(&klp_mutex); - patch->state = KLP_DISABLED; + patch->enabled = false; ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, klp_root_kobj, "%s", patch->mod->name); @@ -891,7 +891,7 @@ int klp_unregister_patch(struct klp_patch *patch) goto out; } - if (patch->state == KLP_ENABLED) { + if (patch->enabled) { ret = -EBUSY; goto out; } @@ -978,13 +978,13 @@ int klp_module_coming(struct module *mod) goto err; } - if (patch->state == KLP_DISABLED) + if (!patch->enabled) break; pr_notice("applying patch '%s' to loading module '%s'\n", patch->mod->name, obj->mod->name); - ret = klp_enable_object(obj); + ret = klp_patch_object(obj); if (ret) { pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", patch->mod->name, obj->mod->name, ret); @@ -1035,10 +1035,10 @@ void klp_module_going(struct module *mod) if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) continue; - if (patch->state != KLP_DISABLED) { + if (patch->enabled) { pr_notice("reverting patch '%s' on unloading module '%s'\n", patch->mod->name, obj->mod->name); - klp_disable_object(obj); + klp_unpatch_object(obj); } klp_free_object_loaded(obj); From aa82dc3e00da63751bb9dfab26983037b79fc39d Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:36 -0600 Subject: [PATCH 09/19] livepatch: remove unnecessary object loaded check klp_patch_object()'s callers already ensure that the object is loaded, so its call to klp_is_object_loaded() is unnecessary. This will also make it possible to move the patching code into a separate file. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Reviewed-by: Kamalesh Babulal Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 2dbd355cee07..47ed643a6362 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -467,9 +467,6 @@ static int klp_patch_object(struct klp_object *obj) if (WARN_ON(obj->patched)) return -EINVAL; - if (WARN_ON(!klp_is_object_loaded(obj))) - return -EINVAL; - klp_for_each_func(obj, func) { ret = klp_patch_func(func); if (ret) { From c349cdcaba589fb49cf105093ebc695eb8b9ff08 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:37 -0600 Subject: [PATCH 10/19] livepatch: move patching functions into patch.c Move functions related to the actual patching of functions and objects into a new patch.c file. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Reviewed-by: Kamalesh Babulal Signed-off-by: Jiri Kosina --- kernel/livepatch/Makefile | 2 +- kernel/livepatch/core.c | 202 +----------------------------------- kernel/livepatch/patch.c | 213 ++++++++++++++++++++++++++++++++++++++ kernel/livepatch/patch.h | 32 ++++++ 4 files changed, 247 insertions(+), 202 deletions(-) create mode 100644 kernel/livepatch/patch.c create mode 100644 kernel/livepatch/patch.h diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile index e8780c0901d9..e136dad8ff7e 100644 --- a/kernel/livepatch/Makefile +++ b/kernel/livepatch/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o -livepatch-objs := core.o +livepatch-objs := core.o patch.o diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 47ed643a6362..6a137e1f4490 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -24,32 +24,13 @@ #include #include #include -#include #include #include #include #include #include #include - -/** - * struct klp_ops - structure for tracking registered ftrace ops structs - * - * A single ftrace_ops is shared between all enabled replacement functions - * (klp_func structs) which have the same old_addr. This allows the switch - * between function versions to happen instantaneously by updating the klp_ops - * struct's func_stack list. The winner is the klp_func at the top of the - * func_stack (front of the list). - * - * @node: node for the global klp_ops list - * @func_stack: list head for the stack of klp_func's (active func is on top) - * @fops: registered ftrace ops struct - */ -struct klp_ops { - struct list_head node; - struct list_head func_stack; - struct ftrace_ops fops; -}; +#include "patch.h" /* * The klp_mutex protects the global lists and state transitions of any @@ -60,28 +41,12 @@ struct klp_ops { static DEFINE_MUTEX(klp_mutex); static LIST_HEAD(klp_patches); -static LIST_HEAD(klp_ops); static struct kobject *klp_root_kobj; /* TODO: temporary stub */ void klp_update_patch_state(struct task_struct *task) {} -static struct klp_ops *klp_find_ops(unsigned long old_addr) -{ - struct klp_ops *ops; - struct klp_func *func; - - list_for_each_entry(ops, &klp_ops, node) { - func = list_first_entry(&ops->func_stack, struct klp_func, - stack_node); - if (func->old_addr == old_addr) - return ops; - } - - return NULL; -} - static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -314,171 +279,6 @@ static int klp_write_object_relocations(struct module *pmod, return ret; } -static void notrace klp_ftrace_handler(unsigned long ip, - unsigned long parent_ip, - struct ftrace_ops *fops, - struct pt_regs *regs) -{ - struct klp_ops *ops; - struct klp_func *func; - - ops = container_of(fops, struct klp_ops, fops); - - rcu_read_lock(); - func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, - stack_node); - if (WARN_ON_ONCE(!func)) - goto unlock; - - klp_arch_set_pc(regs, (unsigned long)func->new_func); -unlock: - rcu_read_unlock(); -} - -/* - * Convert a function address into the appropriate ftrace location. - * - * Usually this is just the address of the function, but on some architectures - * it's more complicated so allow them to provide a custom behaviour. - */ -#ifndef klp_get_ftrace_location -static unsigned long klp_get_ftrace_location(unsigned long faddr) -{ - return faddr; -} -#endif - -static void klp_unpatch_func(struct klp_func *func) -{ - struct klp_ops *ops; - - if (WARN_ON(!func->patched)) - return; - if (WARN_ON(!func->old_addr)) - return; - - ops = klp_find_ops(func->old_addr); - if (WARN_ON(!ops)) - return; - - if (list_is_singular(&ops->func_stack)) { - unsigned long ftrace_loc; - - ftrace_loc = klp_get_ftrace_location(func->old_addr); - if (WARN_ON(!ftrace_loc)) - return; - - WARN_ON(unregister_ftrace_function(&ops->fops)); - WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); - - list_del_rcu(&func->stack_node); - list_del(&ops->node); - kfree(ops); - } else { - list_del_rcu(&func->stack_node); - } - - func->patched = false; -} - -static int klp_patch_func(struct klp_func *func) -{ - struct klp_ops *ops; - int ret; - - if (WARN_ON(!func->old_addr)) - return -EINVAL; - - if (WARN_ON(func->patched)) - return -EINVAL; - - ops = klp_find_ops(func->old_addr); - if (!ops) { - unsigned long ftrace_loc; - - ftrace_loc = klp_get_ftrace_location(func->old_addr); - if (!ftrace_loc) { - pr_err("failed to find location for function '%s'\n", - func->old_name); - return -EINVAL; - } - - ops = kzalloc(sizeof(*ops), GFP_KERNEL); - if (!ops) - return -ENOMEM; - - ops->fops.func = klp_ftrace_handler; - ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | - FTRACE_OPS_FL_DYNAMIC | - FTRACE_OPS_FL_IPMODIFY; - - list_add(&ops->node, &klp_ops); - - INIT_LIST_HEAD(&ops->func_stack); - list_add_rcu(&func->stack_node, &ops->func_stack); - - ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); - if (ret) { - pr_err("failed to set ftrace filter for function '%s' (%d)\n", - func->old_name, ret); - goto err; - } - - ret = register_ftrace_function(&ops->fops); - if (ret) { - pr_err("failed to register ftrace handler for function '%s' (%d)\n", - func->old_name, ret); - ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); - goto err; - } - - - } else { - list_add_rcu(&func->stack_node, &ops->func_stack); - } - - func->patched = true; - - return 0; - -err: - list_del_rcu(&func->stack_node); - list_del(&ops->node); - kfree(ops); - return ret; -} - -static void klp_unpatch_object(struct klp_object *obj) -{ - struct klp_func *func; - - klp_for_each_func(obj, func) - if (func->patched) - klp_unpatch_func(func); - - obj->patched = false; -} - -static int klp_patch_object(struct klp_object *obj) -{ - struct klp_func *func; - int ret; - - if (WARN_ON(obj->patched)) - return -EINVAL; - - klp_for_each_func(obj, func) { - ret = klp_patch_func(func); - if (ret) { - klp_unpatch_object(obj); - return ret; - } - } - obj->patched = true; - - return 0; -} - static int __klp_disable_patch(struct klp_patch *patch) { struct klp_object *obj; diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c new file mode 100644 index 000000000000..5efa2620851a --- /dev/null +++ b/kernel/livepatch/patch.c @@ -0,0 +1,213 @@ +/* + * patch.c - livepatch patching functions + * + * Copyright (C) 2014 Seth Jennings + * Copyright (C) 2014 SUSE + * Copyright (C) 2015 Josh Poimboeuf + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include "patch.h" + +static LIST_HEAD(klp_ops); + +struct klp_ops *klp_find_ops(unsigned long old_addr) +{ + struct klp_ops *ops; + struct klp_func *func; + + list_for_each_entry(ops, &klp_ops, node) { + func = list_first_entry(&ops->func_stack, struct klp_func, + stack_node); + if (func->old_addr == old_addr) + return ops; + } + + return NULL; +} + +static void notrace klp_ftrace_handler(unsigned long ip, + unsigned long parent_ip, + struct ftrace_ops *fops, + struct pt_regs *regs) +{ + struct klp_ops *ops; + struct klp_func *func; + + ops = container_of(fops, struct klp_ops, fops); + + rcu_read_lock(); + func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, + stack_node); + if (WARN_ON_ONCE(!func)) + goto unlock; + + klp_arch_set_pc(regs, (unsigned long)func->new_func); +unlock: + rcu_read_unlock(); +} + +/* + * Convert a function address into the appropriate ftrace location. + * + * Usually this is just the address of the function, but on some architectures + * it's more complicated so allow them to provide a custom behaviour. + */ +#ifndef klp_get_ftrace_location +static unsigned long klp_get_ftrace_location(unsigned long faddr) +{ + return faddr; +} +#endif + +static void klp_unpatch_func(struct klp_func *func) +{ + struct klp_ops *ops; + + if (WARN_ON(!func->patched)) + return; + if (WARN_ON(!func->old_addr)) + return; + + ops = klp_find_ops(func->old_addr); + if (WARN_ON(!ops)) + return; + + if (list_is_singular(&ops->func_stack)) { + unsigned long ftrace_loc; + + ftrace_loc = klp_get_ftrace_location(func->old_addr); + if (WARN_ON(!ftrace_loc)) + return; + + WARN_ON(unregister_ftrace_function(&ops->fops)); + WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); + + list_del_rcu(&func->stack_node); + list_del(&ops->node); + kfree(ops); + } else { + list_del_rcu(&func->stack_node); + } + + func->patched = false; +} + +static int klp_patch_func(struct klp_func *func) +{ + struct klp_ops *ops; + int ret; + + if (WARN_ON(!func->old_addr)) + return -EINVAL; + + if (WARN_ON(func->patched)) + return -EINVAL; + + ops = klp_find_ops(func->old_addr); + if (!ops) { + unsigned long ftrace_loc; + + ftrace_loc = klp_get_ftrace_location(func->old_addr); + if (!ftrace_loc) { + pr_err("failed to find location for function '%s'\n", + func->old_name); + return -EINVAL; + } + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; + + ops->fops.func = klp_ftrace_handler; + ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | + FTRACE_OPS_FL_DYNAMIC | + FTRACE_OPS_FL_IPMODIFY; + + list_add(&ops->node, &klp_ops); + + INIT_LIST_HEAD(&ops->func_stack); + list_add_rcu(&func->stack_node, &ops->func_stack); + + ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); + if (ret) { + pr_err("failed to set ftrace filter for function '%s' (%d)\n", + func->old_name, ret); + goto err; + } + + ret = register_ftrace_function(&ops->fops); + if (ret) { + pr_err("failed to register ftrace handler for function '%s' (%d)\n", + func->old_name, ret); + ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); + goto err; + } + + + } else { + list_add_rcu(&func->stack_node, &ops->func_stack); + } + + func->patched = true; + + return 0; + +err: + list_del_rcu(&func->stack_node); + list_del(&ops->node); + kfree(ops); + return ret; +} + +void klp_unpatch_object(struct klp_object *obj) +{ + struct klp_func *func; + + klp_for_each_func(obj, func) + if (func->patched) + klp_unpatch_func(func); + + obj->patched = false; +} + +int klp_patch_object(struct klp_object *obj) +{ + struct klp_func *func; + int ret; + + if (WARN_ON(obj->patched)) + return -EINVAL; + + klp_for_each_func(obj, func) { + ret = klp_patch_func(func); + if (ret) { + klp_unpatch_object(obj); + return ret; + } + } + obj->patched = true; + + return 0; +} diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h new file mode 100644 index 000000000000..2d0cce02dade --- /dev/null +++ b/kernel/livepatch/patch.h @@ -0,0 +1,32 @@ +#ifndef _LIVEPATCH_PATCH_H +#define _LIVEPATCH_PATCH_H + +#include +#include +#include + +/** + * struct klp_ops - structure for tracking registered ftrace ops structs + * + * A single ftrace_ops is shared between all enabled replacement functions + * (klp_func structs) which have the same old_addr. This allows the switch + * between function versions to happen instantaneously by updating the klp_ops + * struct's func_stack list. The winner is the klp_func at the top of the + * func_stack (front of the list). + * + * @node: node for the global klp_ops list + * @func_stack: list head for the stack of klp_func's (active func is on top) + * @fops: registered ftrace ops struct + */ +struct klp_ops { + struct list_head node; + struct list_head func_stack; + struct ftrace_ops fops; +}; + +struct klp_ops *klp_find_ops(unsigned long old_addr); + +int klp_patch_object(struct klp_object *obj); +void klp_unpatch_object(struct klp_object *obj); + +#endif /* _LIVEPATCH_PATCH_H */ From 68ae4b2b687c3da59ca1d762646ddece4ea1c438 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:38 -0600 Subject: [PATCH 11/19] livepatch: use kstrtobool() in enabled_store() The sysfs enabled value is a boolean, so kstrtobool() is a better fit for parsing the input string since it does the range checking for us. Suggested-by: Petr Mladek Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 6a137e1f4490..83c4949862b4 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -408,26 +408,23 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, { struct klp_patch *patch; int ret; - unsigned long val; + bool enabled; - ret = kstrtoul(buf, 10, &val); + ret = kstrtobool(buf, &enabled); if (ret) - return -EINVAL; - - if (val > 1) - return -EINVAL; + return ret; patch = container_of(kobj, struct klp_patch, kobj); mutex_lock(&klp_mutex); - if (patch->enabled == val) { + if (patch->enabled == enabled) { /* already in requested state */ ret = -EINVAL; goto err; } - if (val) { + if (enabled) { ret = __klp_enable_patch(patch); if (ret) goto err; From f5e547f4ac785c65a39211f0b8e4ffc4fe09112d Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:39 -0600 Subject: [PATCH 12/19] livepatch: store function sizes For the consistency model we'll need to know the sizes of the old and new functions to determine if they're on the stacks of any tasks. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Reviewed-by: Petr Mladek Reviewed-by: Kamalesh Babulal Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 3 +++ kernel/livepatch/core.c | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9787a63b57ac..6602b34bed2b 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -37,6 +37,8 @@ * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @stack_node: list node for klp_ops func_stack list + * @old_size: size of the old function + * @new_size: size of the new function * @patched: the func has been added to the klp_ops list */ struct klp_func { @@ -56,6 +58,7 @@ struct klp_func { unsigned long old_addr; struct kobject kobj; struct list_head stack_node; + unsigned long old_size, new_size; bool patched; }; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 83c4949862b4..10ba3a1578bd 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -584,6 +584,22 @@ static int klp_init_object_loaded(struct klp_patch *patch, &func->old_addr); if (ret) return ret; + + ret = kallsyms_lookup_size_offset(func->old_addr, + &func->old_size, NULL); + if (!ret) { + pr_err("kallsyms size lookup failed for '%s'\n", + func->old_name); + return -ENOENT; + } + + ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, + &func->new_size, NULL); + if (!ret) { + pr_err("kallsyms size lookup failed for '%s' replacement\n", + func->old_name); + return -ENOENT; + } } return 0; From d83a7cb375eec21f04c83542395d08b2f6641da2 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:40 -0600 Subject: [PATCH 13/19] livepatch: change to a per-task consistency model Change livepatch to use a basic per-task consistency model. This is the foundation which will eventually enable us to patch those ~10% of security patches which change function or data semantics. This is the biggest remaining piece needed to make livepatch more generally useful. This code stems from the design proposal made by Vojtech [1] in November 2014. It's a hybrid of kGraft and kpatch: it uses kGraft's per-task consistency and syscall barrier switching combined with kpatch's stack trace switching. There are also a number of fallback options which make it quite flexible. Patches are applied on a per-task basis, when the task is deemed safe to switch over. When a patch is enabled, livepatch enters into a transition state where tasks are converging to the patched state. Usually this transition state can complete in a few seconds. The same sequence occurs when a patch is disabled, except the tasks converge from the patched state to the unpatched state. An interrupt handler inherits the patched state of the task it interrupts. The same is true for forked tasks: the child inherits the patched state of the parent. Livepatch uses several complementary approaches to determine when it's safe to patch tasks: 1. The first and most effective approach is stack checking of sleeping tasks. If no affected functions are on the stack of a given task, the task is patched. In most cases this will patch most or all of the tasks on the first try. Otherwise it'll keep trying periodically. This option is only available if the architecture has reliable stacks (HAVE_RELIABLE_STACKTRACE). 2. The second approach, if needed, is kernel exit switching. A task is switched when it returns to user space from a system call, a user space IRQ, or a signal. It's useful in the following cases: a) Patching I/O-bound user tasks which are sleeping on an affected function. In this case you have to send SIGSTOP and SIGCONT to force it to exit the kernel and be patched. b) Patching CPU-bound user tasks. If the task is highly CPU-bound then it will get patched the next time it gets interrupted by an IRQ. c) In the future it could be useful for applying patches for architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In this case you would have to signal most of the tasks on the system. However this isn't supported yet because there's currently no way to patch kthreads without HAVE_RELIABLE_STACKTRACE. 3. For idle "swapper" tasks, since they don't ever exit the kernel, they instead have a klp_update_patch_state() call in the idle loop which allows them to be patched before the CPU enters the idle state. (Note there's not yet such an approach for kthreads.) All the above approaches may be skipped by setting the 'immediate' flag in the 'klp_patch' struct, which will disable per-task consistency and patch all tasks immediately. This can be useful if the patch doesn't change any function or data semantics. Note that, even with this flag set, it's possible that some tasks may still be running with an old version of the function, until that function returns. There's also an 'immediate' flag in the 'klp_func' struct which allows you to specify that certain functions in the patch can be applied without per-task consistency. This might be useful if you want to patch a common function like schedule(), and the function change doesn't need consistency but the rest of the patch does. For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user must set patch->immediate which causes all tasks to be patched immediately. This option should be used with care, only when the patch doesn't change any function or data semantics. In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE may be allowed to use per-task consistency if we can come up with another way to patch kthreads. The /sys/kernel/livepatch//transition file shows whether a patch is in transition. Only a single patch (the topmost patch on the stack) can be in transition at a given time. A patch can remain in transition indefinitely, if any of the tasks are stuck in the initial patch state. A transition can be reversed and effectively canceled by writing the opposite value to the /sys/kernel/livepatch//enabled file while the transition is in progress. Then all the tasks will attempt to converge back to the original patch state. [1] https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Acked-by: Ingo Molnar # for the scheduler changes Signed-off-by: Jiri Kosina --- .../ABI/testing/sysfs-kernel-livepatch | 8 + Documentation/livepatch/livepatch.txt | 180 +++++- include/linux/init_task.h | 9 + include/linux/livepatch.h | 42 +- include/linux/sched.h | 3 + kernel/fork.c | 3 + kernel/livepatch/Makefile | 2 +- kernel/livepatch/core.c | 105 +++- kernel/livepatch/patch.c | 59 ++ kernel/livepatch/patch.h | 1 + kernel/livepatch/transition.c | 543 ++++++++++++++++++ kernel/livepatch/transition.h | 14 + kernel/sched/idle.c | 4 + samples/livepatch/livepatch-sample.c | 17 + 14 files changed, 944 insertions(+), 46 deletions(-) create mode 100644 kernel/livepatch/transition.c create mode 100644 kernel/livepatch/transition.h diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch index da87f43aec58..d5d39748382f 100644 --- a/Documentation/ABI/testing/sysfs-kernel-livepatch +++ b/Documentation/ABI/testing/sysfs-kernel-livepatch @@ -25,6 +25,14 @@ Description: code is currently applied. Writing 0 will disable the patch while writing 1 will re-enable the patch. +What: /sys/kernel/livepatch//transition +Date: Feb 2017 +KernelVersion: 4.12.0 +Contact: live-patching@vger.kernel.org +Description: + An attribute which indicates whether the patch is currently in + transition. + What: /sys/kernel/livepatch// Date: Nov 2014 KernelVersion: 3.19.0 diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt index 9d2096c7160d..4f2aec8d4c12 100644 --- a/Documentation/livepatch/livepatch.txt +++ b/Documentation/livepatch/livepatch.txt @@ -72,7 +72,8 @@ example, they add a NULL pointer or a boundary check, fix a race by adding a missing memory barrier, or add some locking around a critical section. Most of these changes are self contained and the function presents itself the same way to the rest of the system. In this case, the functions might -be updated independently one by one. +be updated independently one by one. (This can be done by setting the +'immediate' flag in the klp_patch struct.) But there are more complex fixes. For example, a patch might change ordering of locking in multiple functions at the same time. Or a patch @@ -86,20 +87,141 @@ or no data are stored in the modified structures at the moment. The theory about how to apply functions a safe way is rather complex. The aim is to define a so-called consistency model. It attempts to define conditions when the new implementation could be used so that the system -stays consistent. The theory is not yet finished. See the discussion at -https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz +stays consistent. -The current consistency model is very simple. It guarantees that either -the old or the new function is called. But various functions get redirected -one by one without any synchronization. +Livepatch has a consistency model which is a hybrid of kGraft and +kpatch: it uses kGraft's per-task consistency and syscall barrier +switching combined with kpatch's stack trace switching. There are also +a number of fallback options which make it quite flexible. -In other words, the current implementation _never_ modifies the behavior -in the middle of the call. It is because it does _not_ rewrite the entire -function in the memory. Instead, the function gets redirected at the -very beginning. But this redirection is used immediately even when -some other functions from the same patch have not been redirected yet. +Patches are applied on a per-task basis, when the task is deemed safe to +switch over. When a patch is enabled, livepatch enters into a +transition state where tasks are converging to the patched state. +Usually this transition state can complete in a few seconds. The same +sequence occurs when a patch is disabled, except the tasks converge from +the patched state to the unpatched state. -See also the section "Limitations" below. +An interrupt handler inherits the patched state of the task it +interrupts. The same is true for forked tasks: the child inherits the +patched state of the parent. + +Livepatch uses several complementary approaches to determine when it's +safe to patch tasks: + +1. The first and most effective approach is stack checking of sleeping + tasks. If no affected functions are on the stack of a given task, + the task is patched. In most cases this will patch most or all of + the tasks on the first try. Otherwise it'll keep trying + periodically. This option is only available if the architecture has + reliable stacks (HAVE_RELIABLE_STACKTRACE). + +2. The second approach, if needed, is kernel exit switching. A + task is switched when it returns to user space from a system call, a + user space IRQ, or a signal. It's useful in the following cases: + + a) Patching I/O-bound user tasks which are sleeping on an affected + function. In this case you have to send SIGSTOP and SIGCONT to + force it to exit the kernel and be patched. + b) Patching CPU-bound user tasks. If the task is highly CPU-bound + then it will get patched the next time it gets interrupted by an + IRQ. + c) In the future it could be useful for applying patches for + architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In + this case you would have to signal most of the tasks on the + system. However this isn't supported yet because there's + currently no way to patch kthreads without + HAVE_RELIABLE_STACKTRACE. + +3. For idle "swapper" tasks, since they don't ever exit the kernel, they + instead have a klp_update_patch_state() call in the idle loop which + allows them to be patched before the CPU enters the idle state. + + (Note there's not yet such an approach for kthreads.) + +All the above approaches may be skipped by setting the 'immediate' flag +in the 'klp_patch' struct, which will disable per-task consistency and +patch all tasks immediately. This can be useful if the patch doesn't +change any function or data semantics. Note that, even with this flag +set, it's possible that some tasks may still be running with an old +version of the function, until that function returns. + +There's also an 'immediate' flag in the 'klp_func' struct which allows +you to specify that certain functions in the patch can be applied +without per-task consistency. This might be useful if you want to patch +a common function like schedule(), and the function change doesn't need +consistency but the rest of the patch does. + +For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user +must set patch->immediate which causes all tasks to be patched +immediately. This option should be used with care, only when the patch +doesn't change any function or data semantics. + +In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE +may be allowed to use per-task consistency if we can come up with +another way to patch kthreads. + +The /sys/kernel/livepatch//transition file shows whether a patch +is in transition. Only a single patch (the topmost patch on the stack) +can be in transition at a given time. A patch can remain in transition +indefinitely, if any of the tasks are stuck in the initial patch state. + +A transition can be reversed and effectively canceled by writing the +opposite value to the /sys/kernel/livepatch//enabled file while +the transition is in progress. Then all the tasks will attempt to +converge back to the original patch state. + +There's also a /proc//patch_state file which can be used to +determine which tasks are blocking completion of a patching operation. +If a patch is in transition, this file shows 0 to indicate the task is +unpatched and 1 to indicate it's patched. Otherwise, if no patch is in +transition, it shows -1. Any tasks which are blocking the transition +can be signaled with SIGSTOP and SIGCONT to force them to change their +patched state. + + +3.1 Adding consistency model support to new architectures +--------------------------------------------------------- + +For adding consistency model support to new architectures, there are a +few options: + +1) Add CONFIG_HAVE_RELIABLE_STACKTRACE. This means porting objtool, and + for non-DWARF unwinders, also making sure there's a way for the stack + tracing code to detect interrupts on the stack. + +2) Alternatively, ensure that every kthread has a call to + klp_update_patch_state() in a safe location. Kthreads are typically + in an infinite loop which does some action repeatedly. The safe + location to switch the kthread's patch state would be at a designated + point in the loop where there are no locks taken and all data + structures are in a well-defined state. + + The location is clear when using workqueues or the kthread worker + API. These kthreads process independent actions in a generic loop. + + It's much more complicated with kthreads which have a custom loop. + There the safe location must be carefully selected on a case-by-case + basis. + + In that case, arches without HAVE_RELIABLE_STACKTRACE would still be + able to use the non-stack-checking parts of the consistency model: + + a) patching user tasks when they cross the kernel/user space + boundary; and + + b) patching kthreads and idle tasks at their designated patch points. + + This option isn't as good as option 1 because it requires signaling + user tasks and waking kthreads to patch them. But it could still be + a good backup option for those architectures which don't have + reliable stack traces yet. + +In the meantime, patches for such architectures can bypass the +consistency model by setting klp_patch.immediate to true. This option +is perfectly fine for patches which don't change the semantics of the +patched functions. In practice, this is usable for ~90% of security +fixes. Use of this option also means the patch can't be unloaded after +it has been disabled. 4. Livepatch module @@ -134,7 +256,7 @@ Documentation/livepatch/module-elf-format.txt for more details. 4.2. Metadata ------------- +------------- The patch is described by several structures that split the information into three levels: @@ -156,6 +278,9 @@ into three levels: only for a particular object ( vmlinux or a kernel module ). Note that kallsyms allows for searching symbols according to the object name. + There's also an 'immediate' flag which, when set, patches the + function immediately, bypassing the consistency model safety checks. + + struct klp_object defines an array of patched functions (struct klp_func) in the same object. Where the object is either vmlinux (NULL) or a module name. @@ -172,10 +297,13 @@ into three levels: This structure handles all patched functions consistently and eventually, synchronously. The whole patch is applied only when all patched symbols are found. The only exception are symbols from objects - (kernel modules) that have not been loaded yet. Also if a more complex - consistency model is supported then a selected unit (thread, - kernel as a whole) will see the new code from the entire patch - only when it is in a safe state. + (kernel modules) that have not been loaded yet. + + Setting the 'immediate' flag applies the patch to all tasks + immediately, bypassing the consistency model safety checks. + + For more details on how the patch is applied on a per-task basis, + see the "Consistency model" section. 4.3. Livepatch module handling @@ -239,9 +367,15 @@ Registered patches might be enabled either by calling klp_enable_patch() or by writing '1' to /sys/kernel/livepatch//enabled. The system will start using the new implementation of the patched functions at this stage. -In particular, if an original function is patched for the first time, a -function specific struct klp_ops is created and an universal ftrace handler -is registered. +When a patch is enabled, livepatch enters into a transition state where +tasks are converging to the patched state. This is indicated by a value +of '1' in /sys/kernel/livepatch//transition. Once all tasks have +been patched, the 'transition' value changes to '0'. For more +information about this process, see the "Consistency model" section. + +If an original function is patched for the first time, a function +specific struct klp_ops is created and an universal ftrace handler is +registered. Functions might be patched multiple times. The ftrace handler is registered only once for the given function. Further patches just add an entry to the @@ -261,6 +395,12 @@ by writing '0' to /sys/kernel/livepatch//enabled. At this stage either the code from the previously enabled patch or even the original code gets used. +When a patch is disabled, livepatch enters into a transition state where +tasks are converging to the unpatched state. This is indicated by a +value of '1' in /sys/kernel/livepatch//transition. Once all tasks +have been unpatched, the 'transition' value changes to '0'. For more +information about this process, see the "Consistency model" section. + Here all the functions (struct klp_func) associated with the to-be-disabled patch are removed from the corresponding struct klp_ops. The ftrace handler is unregistered and the struct klp_ops is freed when the func_stack list diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 91d9049f0039..5a791055b176 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -202,6 +203,13 @@ extern struct cred init_cred; # define INIT_KASAN(tsk) #endif +#ifdef CONFIG_LIVEPATCH +# define INIT_LIVEPATCH(tsk) \ + .patch_state = KLP_UNDEFINED, +#else +# define INIT_LIVEPATCH(tsk) +#endif + #ifdef CONFIG_THREAD_INFO_IN_TASK # define INIT_TASK_TI(tsk) \ .thread_info = INIT_THREAD_INFO(tsk), \ @@ -288,6 +296,7 @@ extern struct cred init_cred; INIT_VTIME(tsk) \ INIT_NUMA_BALANCING(tsk) \ INIT_KASAN(tsk) \ + INIT_LIVEPATCH(tsk) \ } diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 6602b34bed2b..ed90ad1605c1 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -28,18 +28,40 @@ #include +/* task patch states */ +#define KLP_UNDEFINED -1 +#define KLP_UNPATCHED 0 +#define KLP_PATCHED 1 + /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched * @new_func: pointer to the patched function code * @old_sympos: a hint indicating which symbol position the old function * can be found (optional) + * @immediate: patch the func immediately, bypassing safety mechanisms * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function * @patched: the func has been added to the klp_ops list + * @transition: the func is currently being applied or reverted + * + * The patched and transition variables define the func's patching state. When + * patching, a func is always in one of the following states: + * + * patched=0 transition=0: unpatched + * patched=0 transition=1: unpatched, temporary starting state + * patched=1 transition=1: patched, may be visible to some tasks + * patched=1 transition=0: patched, visible to all tasks + * + * And when unpatching, it goes in the reverse order: + * + * patched=1 transition=0: patched, visible to all tasks + * patched=1 transition=1: patched, may be visible to some tasks + * patched=0 transition=1: unpatched, temporary ending state + * patched=0 transition=0: unpatched */ struct klp_func { /* external */ @@ -53,6 +75,7 @@ struct klp_func { * in kallsyms for the given object is used. */ unsigned long old_sympos; + bool immediate; /* internal */ unsigned long old_addr; @@ -60,6 +83,7 @@ struct klp_func { struct list_head stack_node; unsigned long old_size, new_size; bool patched; + bool transition; }; /** @@ -68,7 +92,7 @@ struct klp_func { * @funcs: function entries for functions to be patched in the object * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object - * (NULL for vmlinux) + * (NULL for vmlinux) * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { @@ -86,6 +110,7 @@ struct klp_object { * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched + * @immediate: patch all funcs immediately, bypassing safety mechanisms * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources * @enabled: the patch is enabled (but operation may be incomplete) @@ -94,6 +119,7 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; + bool immediate; /* internal */ struct list_head list; @@ -121,13 +147,27 @@ void arch_klp_init_object_loaded(struct klp_patch *patch, int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); +void klp_copy_process(struct task_struct *child); void klp_update_patch_state(struct task_struct *task); +static inline bool klp_patch_pending(struct task_struct *task) +{ + return test_tsk_thread_flag(task, TIF_PATCH_PENDING); +} + +static inline bool klp_have_reliable_stack(void) +{ + return IS_ENABLED(CONFIG_STACKTRACE) && + IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); +} + #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} +static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} +static inline void klp_copy_process(struct task_struct *child) {} #endif /* CONFIG_LIVEPATCH */ diff --git a/include/linux/sched.h b/include/linux/sched.h index d67eee84fd43..e11032010318 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1037,6 +1037,9 @@ struct task_struct { #ifdef CONFIG_THREAD_INFO_IN_TASK /* A live task holds one reference: */ atomic_t stack_refcount; +#endif +#ifdef CONFIG_LIVEPATCH + int patch_state; #endif /* CPU-specific state of this task: */ struct thread_struct thread; diff --git a/kernel/fork.c b/kernel/fork.c index 6c463c80e93d..942cbcd07c18 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -87,6 +87,7 @@ #include #include #include +#include #include #include @@ -1797,6 +1798,8 @@ static __latent_entropy struct task_struct *copy_process( p->parent_exec_id = current->self_exec_id; } + klp_copy_process(p); + spin_lock(¤t->sighand->siglock); /* diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile index e136dad8ff7e..2b8bdb1925da 100644 --- a/kernel/livepatch/Makefile +++ b/kernel/livepatch/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o -livepatch-objs := core.o patch.o +livepatch-objs := core.o patch.o transition.o diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 10ba3a1578bd..3dc3c9049690 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -31,22 +31,22 @@ #include #include #include "patch.h" +#include "transition.h" /* - * The klp_mutex protects the global lists and state transitions of any - * structure reachable from them. References to any structure must be obtained - * under mutex protection (except in klp_ftrace_handler(), which uses RCU to - * ensure it gets consistent data). + * klp_mutex is a coarse lock which serializes access to klp data. All + * accesses to klp-related variables and structures must have mutex protection, + * except within the following functions which carefully avoid the need for it: + * + * - klp_ftrace_handler() + * - klp_update_patch_state() */ -static DEFINE_MUTEX(klp_mutex); +DEFINE_MUTEX(klp_mutex); static LIST_HEAD(klp_patches); static struct kobject *klp_root_kobj; -/* TODO: temporary stub */ -void klp_update_patch_state(struct task_struct *task) {} - static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -85,7 +85,6 @@ static void klp_find_object_module(struct klp_object *obj) mutex_unlock(&module_mutex); } -/* klp_mutex must be held by caller */ static bool klp_is_patch_registered(struct klp_patch *patch) { struct klp_patch *mypatch; @@ -281,20 +280,27 @@ static int klp_write_object_relocations(struct module *pmod, static int __klp_disable_patch(struct klp_patch *patch) { - struct klp_object *obj; + if (klp_transition_patch) + return -EBUSY; /* enforce stacking: only the last enabled patch can be disabled */ if (!list_is_last(&patch->list, &klp_patches) && list_next_entry(patch, list)->enabled) return -EBUSY; - pr_notice("disabling patch '%s'\n", patch->mod->name); + klp_init_transition(patch, KLP_UNPATCHED); - klp_for_each_object(patch, obj) { - if (obj->patched) - klp_unpatch_object(obj); - } + /* + * Enforce the order of the func->transition writes in + * klp_init_transition() and the TIF_PATCH_PENDING writes in + * klp_start_transition(). In the rare case where klp_ftrace_handler() + * is called shortly after klp_update_patch_state() switches the task, + * this ensures the handler sees that func->transition is set. + */ + smp_wmb(); + klp_start_transition(); + klp_try_complete_transition(); patch->enabled = false; return 0; @@ -337,6 +343,9 @@ static int __klp_enable_patch(struct klp_patch *patch) struct klp_object *obj; int ret; + if (klp_transition_patch) + return -EBUSY; + if (WARN_ON(patch->enabled)) return -EINVAL; @@ -347,22 +356,36 @@ static int __klp_enable_patch(struct klp_patch *patch) pr_notice("enabling patch '%s'\n", patch->mod->name); + klp_init_transition(patch, KLP_PATCHED); + + /* + * Enforce the order of the func->transition writes in + * klp_init_transition() and the ops->func_stack writes in + * klp_patch_object(), so that klp_ftrace_handler() will see the + * func->transition updates before the handler is registered and the + * new funcs become visible to the handler. + */ + smp_wmb(); + klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue; ret = klp_patch_object(obj); - if (ret) - goto unregister; + if (ret) { + pr_warn("failed to enable patch '%s'\n", + patch->mod->name); + + klp_cancel_transition(); + return ret; + } } + klp_start_transition(); + klp_try_complete_transition(); patch->enabled = true; return 0; - -unregister: - WARN_ON(__klp_disable_patch(patch)); - return ret; } /** @@ -399,6 +422,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); * /sys/kernel/livepatch * /sys/kernel/livepatch/ * /sys/kernel/livepatch//enabled + * /sys/kernel/livepatch//transition * /sys/kernel/livepatch// * /sys/kernel/livepatch/// */ @@ -424,7 +448,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, goto err; } - if (enabled) { + if (patch == klp_transition_patch) { + klp_reverse_transition(); + } else if (enabled) { ret = __klp_enable_patch(patch); if (ret) goto err; @@ -452,9 +478,21 @@ static ssize_t enabled_show(struct kobject *kobj, return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); } +static ssize_t transition_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_patch *patch; + + patch = container_of(kobj, struct klp_patch, kobj); + return snprintf(buf, PAGE_SIZE-1, "%d\n", + patch == klp_transition_patch); +} + static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); +static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, + &transition_kobj_attr.attr, NULL }; @@ -544,6 +582,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) INIT_LIST_HEAD(&func->stack_node); func->patched = false; + func->transition = false; /* The format for the sysfs directory is where sympos * is the nth occurrence of this symbol in kallsyms for the patched @@ -739,6 +778,16 @@ int klp_register_patch(struct klp_patch *patch) if (!klp_initialized()) return -ENODEV; + /* + * Architectures without reliable stack traces have to set + * patch->immediate because there's currently no way to patch kthreads + * with the consistency model. + */ + if (!klp_have_reliable_stack() && !patch->immediate) { + pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); + return -ENOSYS; + } + /* * A reference is taken on the patch module to prevent it from being * unloaded. Right now, we don't allow patch modules to unload since @@ -788,7 +837,11 @@ int klp_module_coming(struct module *mod) goto err; } - if (!patch->enabled) + /* + * Only patch the module if the patch is enabled or is + * in transition. + */ + if (!patch->enabled && patch != klp_transition_patch) break; pr_notice("applying patch '%s' to loading module '%s'\n", @@ -845,7 +898,11 @@ void klp_module_going(struct module *mod) if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) continue; - if (patch->enabled) { + /* + * Only unpatch the module if the patch is enabled or + * is in transition. + */ + if (patch->enabled || patch == klp_transition_patch) { pr_notice("reverting patch '%s' on unloading module '%s'\n", patch->mod->name, obj->mod->name); klp_unpatch_object(obj); diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 5efa2620851a..f8269036bf0b 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -29,6 +29,7 @@ #include #include #include "patch.h" +#include "transition.h" static LIST_HEAD(klp_ops); @@ -54,15 +55,64 @@ static void notrace klp_ftrace_handler(unsigned long ip, { struct klp_ops *ops; struct klp_func *func; + int patch_state; ops = container_of(fops, struct klp_ops, fops); rcu_read_lock(); + func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); + + /* + * func should never be NULL because preemption should be disabled here + * and unregister_ftrace_function() does the equivalent of a + * synchronize_sched() before the func_stack removal. + */ if (WARN_ON_ONCE(!func)) goto unlock; + /* + * In the enable path, enforce the order of the ops->func_stack and + * func->transition reads. The corresponding write barrier is in + * __klp_enable_patch(). + * + * (Note that this barrier technically isn't needed in the disable + * path. In the rare case where klp_update_patch_state() runs before + * this handler, its TIF_PATCH_PENDING read and this func->transition + * read need to be ordered. But klp_update_patch_state() already + * enforces that.) + */ + smp_rmb(); + + if (unlikely(func->transition)) { + + /* + * Enforce the order of the func->transition and + * current->patch_state reads. Otherwise we could read an + * out-of-date task state and pick the wrong function. The + * corresponding write barrier is in klp_init_transition(). + */ + smp_rmb(); + + patch_state = current->patch_state; + + WARN_ON_ONCE(patch_state == KLP_UNDEFINED); + + if (patch_state == KLP_UNPATCHED) { + /* + * Use the previously patched version of the function. + * If no previous patches exist, continue with the + * original function. + */ + func = list_entry_rcu(func->stack_node.next, + struct klp_func, stack_node); + + if (&func->stack_node == &ops->func_stack) + goto unlock; + } + } + klp_arch_set_pc(regs, (unsigned long)func->new_func); unlock: rcu_read_unlock(); @@ -211,3 +261,12 @@ int klp_patch_object(struct klp_object *obj) return 0; } + +void klp_unpatch_objects(struct klp_patch *patch) +{ + struct klp_object *obj; + + klp_for_each_object(patch, obj) + if (obj->patched) + klp_unpatch_object(obj); +} diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h index 2d0cce02dade..0db227170c36 100644 --- a/kernel/livepatch/patch.h +++ b/kernel/livepatch/patch.h @@ -28,5 +28,6 @@ struct klp_ops *klp_find_ops(unsigned long old_addr); int klp_patch_object(struct klp_object *obj); void klp_unpatch_object(struct klp_object *obj); +void klp_unpatch_objects(struct klp_patch *patch); #endif /* _LIVEPATCH_PATCH_H */ diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c new file mode 100644 index 000000000000..428533ec51b5 --- /dev/null +++ b/kernel/livepatch/transition.c @@ -0,0 +1,543 @@ +/* + * transition.c - Kernel Live Patching transition functions + * + * Copyright (C) 2015-2016 Josh Poimboeuf + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include "patch.h" +#include "transition.h" +#include "../sched/sched.h" + +#define MAX_STACK_ENTRIES 100 +#define STACK_ERR_BUF_SIZE 128 + +extern struct mutex klp_mutex; + +struct klp_patch *klp_transition_patch; + +static int klp_target_state = KLP_UNDEFINED; + +/* + * This work can be performed periodically to finish patching or unpatching any + * "straggler" tasks which failed to transition in the first attempt. + */ +static void klp_transition_work_fn(struct work_struct *work) +{ + mutex_lock(&klp_mutex); + + if (klp_transition_patch) + klp_try_complete_transition(); + + mutex_unlock(&klp_mutex); +} +static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); + +/* + * The transition to the target patch state is complete. Clean up the data + * structures. + */ +static void klp_complete_transition(void) +{ + struct klp_object *obj; + struct klp_func *func; + struct task_struct *g, *task; + unsigned int cpu; + + if (klp_target_state == KLP_UNPATCHED) { + /* + * All tasks have transitioned to KLP_UNPATCHED so we can now + * remove the new functions from the func_stack. + */ + klp_unpatch_objects(klp_transition_patch); + + /* + * Make sure klp_ftrace_handler() can no longer see functions + * from this patch on the ops->func_stack. Otherwise, after + * func->transition gets cleared, the handler may choose a + * removed function. + */ + synchronize_rcu(); + } + + if (klp_transition_patch->immediate) + goto done; + + klp_for_each_object(klp_transition_patch, obj) + klp_for_each_func(obj, func) + func->transition = false; + + /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ + if (klp_target_state == KLP_PATCHED) + synchronize_rcu(); + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); + task->patch_state = KLP_UNDEFINED; + } + read_unlock(&tasklist_lock); + + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); + task->patch_state = KLP_UNDEFINED; + } + +done: + klp_target_state = KLP_UNDEFINED; + klp_transition_patch = NULL; +} + +/* + * This is called in the error path, to cancel a transition before it has + * started, i.e. klp_init_transition() has been called but + * klp_start_transition() hasn't. If the transition *has* been started, + * klp_reverse_transition() should be used instead. + */ +void klp_cancel_transition(void) +{ + klp_target_state = !klp_target_state; + klp_complete_transition(); +} + +/* + * Switch the patched state of the task to the set of functions in the target + * patch state. + * + * NOTE: If task is not 'current', the caller must ensure the task is inactive. + * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. + */ +void klp_update_patch_state(struct task_struct *task) +{ + rcu_read_lock(); + + /* + * This test_and_clear_tsk_thread_flag() call also serves as a read + * barrier (smp_rmb) for two cases: + * + * 1) Enforce the order of the TIF_PATCH_PENDING read and the + * klp_target_state read. The corresponding write barrier is in + * klp_init_transition(). + * + * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read + * of func->transition, if klp_ftrace_handler() is called later on + * the same CPU. See __klp_disable_patch(). + */ + if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) + task->patch_state = READ_ONCE(klp_target_state); + + rcu_read_unlock(); +} + +/* + * Determine whether the given stack trace includes any references to a + * to-be-patched or to-be-unpatched function. + */ +static int klp_check_stack_func(struct klp_func *func, + struct stack_trace *trace) +{ + unsigned long func_addr, func_size, address; + struct klp_ops *ops; + int i; + + if (func->immediate) + return 0; + + for (i = 0; i < trace->nr_entries; i++) { + address = trace->entries[i]; + + if (klp_target_state == KLP_UNPATCHED) { + /* + * Check for the to-be-unpatched function + * (the func itself). + */ + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + } else { + /* + * Check for the to-be-patched function + * (the previous func). + */ + ops = klp_find_ops(func->old_addr); + + if (list_is_singular(&ops->func_stack)) { + /* original function */ + func_addr = func->old_addr; + func_size = func->old_size; + } else { + /* previously patched function */ + struct klp_func *prev; + + prev = list_next_entry(func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + } + + if (address >= func_addr && address < func_addr + func_size) + return -EAGAIN; + } + + return 0; +} + +/* + * Determine whether it's safe to transition the task to the target patch state + * by looking for any to-be-patched or to-be-unpatched functions on its stack. + */ +static int klp_check_stack(struct task_struct *task, char *err_buf) +{ + static unsigned long entries[MAX_STACK_ENTRIES]; + struct stack_trace trace; + struct klp_object *obj; + struct klp_func *func; + int ret; + + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = entries; + ret = save_stack_trace_tsk_reliable(task, &trace); + WARN_ON_ONCE(ret == -ENOSYS); + if (ret) { + snprintf(err_buf, STACK_ERR_BUF_SIZE, + "%s: %s:%d has an unreliable stack\n", + __func__, task->comm, task->pid); + return ret; + } + + klp_for_each_object(klp_transition_patch, obj) { + if (!obj->patched) + continue; + klp_for_each_func(obj, func) { + ret = klp_check_stack_func(func, &trace); + if (ret) { + snprintf(err_buf, STACK_ERR_BUF_SIZE, + "%s: %s:%d is sleeping on function %s\n", + __func__, task->comm, task->pid, + func->old_name); + return ret; + } + } + } + + return 0; +} + +/* + * Try to safely switch a task to the target patch state. If it's currently + * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or + * if the stack is unreliable, return false. + */ +static bool klp_try_switch_task(struct task_struct *task) +{ + struct rq *rq; + struct rq_flags flags; + int ret; + bool success = false; + char err_buf[STACK_ERR_BUF_SIZE]; + + err_buf[0] = '\0'; + + /* check if this task has already switched over */ + if (task->patch_state == klp_target_state) + return true; + + /* + * For arches which don't have reliable stack traces, we have to rely + * on other methods (e.g., switching tasks at kernel exit). + */ + if (!klp_have_reliable_stack()) + return false; + + /* + * Now try to check the stack for any to-be-patched or to-be-unpatched + * functions. If all goes well, switch the task to the target patch + * state. + */ + rq = task_rq_lock(task, &flags); + + if (task_running(rq, task) && task != current) { + snprintf(err_buf, STACK_ERR_BUF_SIZE, + "%s: %s:%d is running\n", __func__, task->comm, + task->pid); + goto done; + } + + ret = klp_check_stack(task, err_buf); + if (ret) + goto done; + + success = true; + + clear_tsk_thread_flag(task, TIF_PATCH_PENDING); + task->patch_state = klp_target_state; + +done: + task_rq_unlock(rq, task, &flags); + + /* + * Due to console deadlock issues, pr_debug() can't be used while + * holding the task rq lock. Instead we have to use a temporary buffer + * and print the debug message after releasing the lock. + */ + if (err_buf[0] != '\0') + pr_debug("%s", err_buf); + + return success; + +} + +/* + * Try to switch all remaining tasks to the target patch state by walking the + * stacks of sleeping tasks and looking for any to-be-patched or + * to-be-unpatched functions. If such functions are found, the task can't be + * switched yet. + * + * If any tasks are still stuck in the initial patch state, schedule a retry. + */ +void klp_try_complete_transition(void) +{ + unsigned int cpu; + struct task_struct *g, *task; + bool complete = true; + + WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + + /* + * If the patch can be applied or reverted immediately, skip the + * per-task transitions. + */ + if (klp_transition_patch->immediate) + goto success; + + /* + * Try to switch the tasks to the target patch state by walking their + * stacks and looking for any to-be-patched or to-be-unpatched + * functions. If such functions are found on a stack, or if the stack + * is deemed unreliable, the task can't be switched yet. + * + * Usually this will transition most (or all) of the tasks on a system + * unless the patch includes changes to a very common function. + */ + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + if (!klp_try_switch_task(task)) + complete = false; + read_unlock(&tasklist_lock); + + /* + * Ditto for the idle "swapper" tasks. + */ + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + if (cpu_online(cpu)) { + if (!klp_try_switch_task(task)) + complete = false; + } else if (task->patch_state != klp_target_state) { + /* offline idle tasks can be switched immediately */ + clear_tsk_thread_flag(task, TIF_PATCH_PENDING); + task->patch_state = klp_target_state; + } + } + put_online_cpus(); + + if (!complete) { + /* + * Some tasks weren't able to be switched over. Try again + * later and/or wait for other methods like kernel exit + * switching. + */ + schedule_delayed_work(&klp_transition_work, + round_jiffies_relative(HZ)); + return; + } + +success: + pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, + klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + + /* we're done, now cleanup the data structures */ + klp_complete_transition(); +} + +/* + * Start the transition to the specified target patch state so tasks can begin + * switching to it. + */ +void klp_start_transition(void) +{ + struct task_struct *g, *task; + unsigned int cpu; + + WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + + pr_notice("'%s': %s...\n", klp_transition_patch->mod->name, + klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + + /* + * If the patch can be applied or reverted immediately, skip the + * per-task transitions. + */ + if (klp_transition_patch->immediate) + return; + + /* + * Mark all normal tasks as needing a patch state update. They'll + * switch either in klp_try_complete_transition() or as they exit the + * kernel. + */ + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + if (task->patch_state != klp_target_state) + set_tsk_thread_flag(task, TIF_PATCH_PENDING); + read_unlock(&tasklist_lock); + + /* + * Mark all idle tasks as needing a patch state update. They'll switch + * either in klp_try_complete_transition() or at the idle loop switch + * point. + */ + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + if (task->patch_state != klp_target_state) + set_tsk_thread_flag(task, TIF_PATCH_PENDING); + } +} + +/* + * Initialize the global target patch state and all tasks to the initial patch + * state, and initialize all function transition states to true in preparation + * for patching or unpatching. + */ +void klp_init_transition(struct klp_patch *patch, int state) +{ + struct task_struct *g, *task; + unsigned int cpu; + struct klp_object *obj; + struct klp_func *func; + int initial_state = !state; + + WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); + + klp_transition_patch = patch; + + /* + * Set the global target patch state which tasks will switch to. This + * has no effect until the TIF_PATCH_PENDING flags get set later. + */ + klp_target_state = state; + + /* + * If the patch can be applied or reverted immediately, skip the + * per-task transitions. + */ + if (patch->immediate) + return; + + /* + * Initialize all tasks to the initial patch state to prepare them for + * switching to the target state. + */ + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + task->patch_state = initial_state; + } + read_unlock(&tasklist_lock); + + /* + * Ditto for the idle "swapper" tasks. + */ + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + task->patch_state = initial_state; + } + + /* + * Enforce the order of the task->patch_state initializations and the + * func->transition updates to ensure that klp_ftrace_handler() doesn't + * see a func in transition with a task->patch_state of KLP_UNDEFINED. + * + * Also enforce the order of the klp_target_state write and future + * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't + * set a task->patch_state to KLP_UNDEFINED. + */ + smp_wmb(); + + /* + * Set the func transition states so klp_ftrace_handler() will know to + * switch to the transition logic. + * + * When patching, the funcs aren't yet in the func_stack and will be + * made visible to the ftrace handler shortly by the calls to + * klp_patch_object(). + * + * When unpatching, the funcs are already in the func_stack and so are + * already visible to the ftrace handler. + */ + klp_for_each_object(patch, obj) + klp_for_each_func(obj, func) + func->transition = true; +} + +/* + * This function can be called in the middle of an existing transition to + * reverse the direction of the target patch state. This can be done to + * effectively cancel an existing enable or disable operation if there are any + * tasks which are stuck in the initial patch state. + */ +void klp_reverse_transition(void) +{ + unsigned int cpu; + struct task_struct *g, *task; + + klp_transition_patch->enabled = !klp_transition_patch->enabled; + + klp_target_state = !klp_target_state; + + /* + * Clear all TIF_PATCH_PENDING flags to prevent races caused by + * klp_update_patch_state() running in parallel with + * klp_start_transition(). + */ + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + clear_tsk_thread_flag(task, TIF_PATCH_PENDING); + read_unlock(&tasklist_lock); + + for_each_possible_cpu(cpu) + clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); + + /* Let any remaining calls to klp_update_patch_state() complete */ + synchronize_rcu(); + + klp_start_transition(); +} + +/* Called from copy_process() during fork */ +void klp_copy_process(struct task_struct *child) +{ + child->patch_state = current->patch_state; + + /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ +} diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h new file mode 100644 index 000000000000..ce09b326546c --- /dev/null +++ b/kernel/livepatch/transition.h @@ -0,0 +1,14 @@ +#ifndef _LIVEPATCH_TRANSITION_H +#define _LIVEPATCH_TRANSITION_H + +#include + +extern struct klp_patch *klp_transition_patch; + +void klp_init_transition(struct klp_patch *patch, int state); +void klp_cancel_transition(void); +void klp_start_transition(void); +void klp_try_complete_transition(void); +void klp_reverse_transition(void); + +#endif /* _LIVEPATCH_TRANSITION_H */ diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index ac6d5176463d..2a25a9ec2c6e 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -265,6 +266,9 @@ static void do_idle(void) sched_ttwu_pending(); schedule_preempt_disabled(); + + if (unlikely(klp_patch_pending(current))) + klp_update_patch_state(current); } bool cpu_in_idle(unsigned long pc) diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c index e34f871e69b1..629e0dca0887 100644 --- a/samples/livepatch/livepatch-sample.c +++ b/samples/livepatch/livepatch-sample.c @@ -17,6 +17,8 @@ * along with this program; if not, see . */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -69,6 +71,21 @@ static int livepatch_init(void) { int ret; + if (!klp_have_reliable_stack() && !patch.immediate) { + /* + * WARNING: Be very careful when using 'patch.immediate' in + * your patches. It's ok to use it for simple patches like + * this, but for more complex patches which change function + * semantics, locking semantics, or data structures, it may not + * be safe. Use of this option will also prevent removal of + * the patch. + * + * See Documentation/livepatch/livepatch.txt for more details. + */ + patch.immediate = true; + pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n"); + } + ret = klp_register_patch(&patch); if (ret) return ret; From 7c23b330011690705613a66a8239d2ca64a41d4d Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Feb 2017 19:42:41 -0600 Subject: [PATCH 14/19] livepatch: add /proc//patch_state Expose the per-task patch state value so users can determine which tasks are holding up completion of a patching operation. Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Reviewed-by: Miroslav Benes Signed-off-by: Jiri Kosina --- Documentation/filesystems/proc.txt | 18 ++++++++++++++++++ fs/proc/base.c | 15 +++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index c94b4675d021..9036dbf16156 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -44,6 +44,7 @@ Table of Contents 3.8 /proc//fdinfo/ - Information about opened file 3.9 /proc//map_files - Information about memory mapped files 3.10 /proc//timerslack_ns - Task timerslack value + 3.11 /proc//patch_state - Livepatch patch operation state 4 Configuring procfs 4.1 Mount options @@ -1887,6 +1888,23 @@ Valid values are from 0 - ULLONG_MAX An application setting the value must have PTRACE_MODE_ATTACH_FSCREDS level permissions on the task specified to change its timerslack_ns value. +3.11 /proc//patch_state - Livepatch patch operation state +----------------------------------------------------------------- +When CONFIG_LIVEPATCH is enabled, this file displays the value of the +patch state for the task. + +A value of '-1' indicates that no patch is in transition. + +A value of '0' indicates that a patch is in transition and the task is +unpatched. If the patch is being enabled, then the task hasn't been +patched yet. If the patch is being disabled, then the task has already +been unpatched. + +A value of '1' indicates that a patch is in transition and the task is +patched. If the patch is being enabled, then the task has already been +patched. If the patch is being disabled, then the task hasn't been +unpatched yet. + ------------------------------------------------------------------------------ Configuring procfs diff --git a/fs/proc/base.c b/fs/proc/base.c index c87b6b9a8a76..9e3ac5c11780 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2834,6 +2834,15 @@ static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, return err; } +#ifdef CONFIG_LIVEPATCH +static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "%d\n", task->patch_state); + return 0; +} +#endif /* CONFIG_LIVEPATCH */ + /* * Thread groups */ @@ -2933,6 +2942,9 @@ static const struct pid_entry tgid_base_stuff[] = { REG("timers", S_IRUGO, proc_timers_operations), #endif REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), +#ifdef CONFIG_LIVEPATCH + ONE("patch_state", S_IRUSR, proc_pid_patch_state), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3315,6 +3327,9 @@ static const struct pid_entry tid_base_stuff[] = { REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), #endif +#ifdef CONFIG_LIVEPATCH + ONE("patch_state", S_IRUSR, proc_pid_patch_state), +#endif }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) From 3ec24776bfd09668079df7dca0c0136d80820ab4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 6 Mar 2017 11:20:29 -0600 Subject: [PATCH 15/19] livepatch: allow removal of a disabled patch Currently we do not allow patch module to unload since there is no method to determine if a task is still running in the patched code. The consistency model gives us the way because when the unpatching finishes we know that all tasks were marked as safe to call an original function. Thus every new call to the function calls the original code and at the same time no task can be somewhere in the patched code, because it had to leave that code to be marked as safe. We can safely let the patch module go after that. Completion is used for synchronization between module removal and sysfs infrastructure in a similar way to commit 942e443127e9 ("module: Fix mod->mkobj.kobj potentially freed too early"). Note that we still do not allow the removal for immediate model, that is no consistency model. The module refcount may increase in this case if somebody disables and enables the patch several times. This should not cause any harm. With this change a call to try_module_get() is moved to __klp_enable_patch from klp_register_patch to make module reference counting symmetric (module_put() is in a patch disable path) and to allow to take a new reference to a disabled module when being enabled. Finally, we need to be very careful about possible races between klp_unregister_patch(), kobject_put() functions and operations on the related sysfs files. kobject_put(&patch->kobj) must be called without klp_mutex. Otherwise, it might be blocked by enabled_store() that needs the mutex as well. In addition, enabled_store() must check if the patch was not unregisted in the meantime. There is no need to do the same for other kobject_put() callsites at the moment. Their sysfs operations neither take the lock nor they access any data that might be freed in the meantime. There was an attempt to use kobjects the right way and prevent these races by design. But it made the patch definition more complicated and opened another can of worms. See https://lkml.kernel.org/r/1464018848-4303-1-git-send-email-pmladek@suse.com [Thanks to Petr Mladek for improving the commit message.] Signed-off-by: Miroslav Benes Signed-off-by: Josh Poimboeuf Reviewed-by: Petr Mladek Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- Documentation/livepatch/livepatch.txt | 28 +++------- include/linux/livepatch.h | 3 + kernel/livepatch/core.c | 80 +++++++++++++++++---------- kernel/livepatch/transition.c | 37 ++++++++++++- samples/livepatch/livepatch-sample.c | 1 - 5 files changed, 96 insertions(+), 53 deletions(-) diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt index 4f2aec8d4c12..ecdb18104ab0 100644 --- a/Documentation/livepatch/livepatch.txt +++ b/Documentation/livepatch/livepatch.txt @@ -316,8 +316,15 @@ section "Livepatch life-cycle" below for more details about these two operations. Module removal is only safe when there are no users of the underlying -functions. The immediate consistency model is not able to detect this; -therefore livepatch modules cannot be removed. See "Limitations" below. +functions. The immediate consistency model is not able to detect this. The +code just redirects the functions at the very beginning and it does not +check if the functions are in use. In other words, it knows when the +functions get called but it does not know when the functions return. +Therefore it cannot be decided when the livepatch module can be safely +removed. This is solved by a hybrid consistency model. When the system is +transitioned to a new patch state (patched/unpatched) it is guaranteed that +no task sleeps or runs in the old code. + 5. Livepatch life-cycle ======================= @@ -469,23 +476,6 @@ The current Livepatch implementation has several limitations: by "notrace". - + Livepatch modules can not be removed. - - The current implementation just redirects the functions at the very - beginning. It does not check if the functions are in use. In other - words, it knows when the functions get called but it does not - know when the functions return. Therefore it can not decide when - the livepatch module can be safely removed. - - This will get most likely solved once a more complex consistency model - is supported. The idea is that a safe state for patching should also - mean a safe state for removing the patch. - - Note that the patch itself might get disabled by writing zero - to /sys/kernel/livepatch//enabled. It causes that the new - code will not longer get called. But it does not guarantee - that anyone is not sleeping anywhere in the new code. - + Livepatch works reliably only when the dynamic ftrace is located at the very beginning of the function. diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index ed90ad1605c1..194991ef9347 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -23,6 +23,7 @@ #include #include +#include #if IS_ENABLED(CONFIG_LIVEPATCH) @@ -114,6 +115,7 @@ struct klp_object { * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources * @enabled: the patch is enabled (but operation may be incomplete) + * @finish: for waiting till it is safe to remove the patch module */ struct klp_patch { /* external */ @@ -125,6 +127,7 @@ struct klp_patch { struct list_head list; struct kobject kobj; bool enabled; + struct completion finish; }; #define klp_for_each_object(patch, obj) \ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 3dc3c9049690..6844c1213df8 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "patch.h" #include "transition.h" @@ -354,6 +355,18 @@ static int __klp_enable_patch(struct klp_patch *patch) !list_prev_entry(patch, list)->enabled) return -EBUSY; + /* + * A reference is taken on the patch module to prevent it from being + * unloaded. + * + * Note: For immediate (no consistency model) patches we don't allow + * patch modules to unload since there is no safe/sane method to + * determine if a thread is still running in the patched code contained + * in the patch module once the ftrace registration is successful. + */ + if (!try_module_get(patch->mod)) + return -ENODEV; + pr_notice("enabling patch '%s'\n", patch->mod->name); klp_init_transition(patch, KLP_PATCHED); @@ -442,6 +455,15 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, mutex_lock(&klp_mutex); + if (!klp_is_patch_registered(patch)) { + /* + * Module with the patch could either disappear meanwhile or is + * not properly initialized yet. + */ + ret = -EINVAL; + goto err; + } + if (patch->enabled == enabled) { /* already in requested state */ ret = -EINVAL; @@ -498,10 +520,10 @@ static struct attribute *klp_patch_attrs[] = { static void klp_kobj_release_patch(struct kobject *kobj) { - /* - * Once we have a consistency model we'll need to module_put() the - * patch module here. See klp_register_patch() for more details. - */ + struct klp_patch *patch; + + patch = container_of(kobj, struct klp_patch, kobj); + complete(&patch->finish); } static struct kobj_type klp_ktype_patch = { @@ -572,7 +594,6 @@ static void klp_free_patch(struct klp_patch *patch) klp_free_objects_limited(patch, NULL); if (!list_empty(&patch->list)) list_del(&patch->list); - kobject_put(&patch->kobj); } static int klp_init_func(struct klp_object *obj, struct klp_func *func) @@ -695,11 +716,14 @@ static int klp_init_patch(struct klp_patch *patch) mutex_lock(&klp_mutex); patch->enabled = false; + init_completion(&patch->finish); ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, klp_root_kobj, "%s", patch->mod->name); - if (ret) - goto unlock; + if (ret) { + mutex_unlock(&klp_mutex); + return ret; + } klp_for_each_object(patch, obj) { ret = klp_init_object(patch, obj); @@ -715,9 +739,12 @@ static int klp_init_patch(struct klp_patch *patch) free: klp_free_objects_limited(patch, obj); - kobject_put(&patch->kobj); -unlock: + mutex_unlock(&klp_mutex); + + kobject_put(&patch->kobj); + wait_for_completion(&patch->finish); + return ret; } @@ -731,23 +758,29 @@ unlock: */ int klp_unregister_patch(struct klp_patch *patch) { - int ret = 0; + int ret; mutex_lock(&klp_mutex); if (!klp_is_patch_registered(patch)) { ret = -EINVAL; - goto out; + goto err; } if (patch->enabled) { ret = -EBUSY; - goto out; + goto err; } klp_free_patch(patch); -out: + mutex_unlock(&klp_mutex); + + kobject_put(&patch->kobj); + wait_for_completion(&patch->finish); + + return 0; +err: mutex_unlock(&klp_mutex); return ret; } @@ -760,12 +793,13 @@ EXPORT_SYMBOL_GPL(klp_unregister_patch); * Initializes the data structure associated with the patch and * creates the sysfs interface. * + * There is no need to take the reference on the patch module here. It is done + * later when the patch is enabled. + * * Return: 0 on success, otherwise error */ int klp_register_patch(struct klp_patch *patch) { - int ret; - if (!patch || !patch->mod) return -EINVAL; @@ -788,21 +822,7 @@ int klp_register_patch(struct klp_patch *patch) return -ENOSYS; } - /* - * A reference is taken on the patch module to prevent it from being - * unloaded. Right now, we don't allow patch modules to unload since - * there is currently no method to determine if a thread is still - * running in the patched code contained in the patch module once - * the ftrace registration is successful. - */ - if (!try_module_get(patch->mod)) - return -ENODEV; - - ret = klp_init_patch(patch); - if (ret) - module_put(patch->mod); - - return ret; + return klp_init_patch(patch); } EXPORT_SYMBOL_GPL(klp_register_patch); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 428533ec51b5..0ab7abd53b0b 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -59,6 +59,7 @@ static void klp_complete_transition(void) struct klp_func *func; struct task_struct *g, *task; unsigned int cpu; + bool immediate_func = false; if (klp_target_state == KLP_UNPATCHED) { /* @@ -79,9 +80,16 @@ static void klp_complete_transition(void) if (klp_transition_patch->immediate) goto done; - klp_for_each_object(klp_transition_patch, obj) - klp_for_each_func(obj, func) + klp_for_each_object(klp_transition_patch, obj) { + klp_for_each_func(obj, func) { func->transition = false; + if (func->immediate) + immediate_func = true; + } + } + + if (klp_target_state == KLP_UNPATCHED && !immediate_func) + module_put(klp_transition_patch->mod); /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ if (klp_target_state == KLP_PATCHED) @@ -113,8 +121,31 @@ done: */ void klp_cancel_transition(void) { - klp_target_state = !klp_target_state; + struct klp_patch *patch = klp_transition_patch; + struct klp_object *obj; + struct klp_func *func; + bool immediate_func = false; + + if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) + return; + + klp_target_state = KLP_UNPATCHED; klp_complete_transition(); + + /* + * In the enable error path, even immediate patches can be safely + * removed because the transition hasn't been started yet. + * + * klp_complete_transition() doesn't have a module_put() for immediate + * patches, so do it here. + */ + klp_for_each_object(patch, obj) + klp_for_each_func(obj, func) + if (func->immediate) + immediate_func = true; + + if (patch->immediate || immediate_func) + module_put(patch->mod); } /* diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c index 629e0dca0887..84795223f15f 100644 --- a/samples/livepatch/livepatch-sample.c +++ b/samples/livepatch/livepatch-sample.c @@ -99,7 +99,6 @@ static int livepatch_init(void) static void livepatch_exit(void) { - WARN_ON(klp_disable_patch(&patch)); WARN_ON(klp_unregister_patch(&patch)); } From 10517429b5ac242498d7d847f79f10c21d7eedb0 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Wed, 8 Mar 2017 14:27:05 +0100 Subject: [PATCH 16/19] livepatch: make klp_mutex proper part of API klp_mutex is shared between core.c and transition.c, and as such would rather be properly located in a header so that we don't have to play 'extern' games from .c sources. This also silences sparse warning (wrongly) suggesting that klp_mutex should be defined static. Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 1 + kernel/livepatch/core.h | 6 ++++++ kernel/livepatch/transition.c | 3 +-- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 kernel/livepatch/core.h diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 6844c1213df8..47402b8b3990 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -31,6 +31,7 @@ #include #include #include +#include "core.h" #include "patch.h" #include "transition.h" diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h new file mode 100644 index 000000000000..c74f24c47837 --- /dev/null +++ b/kernel/livepatch/core.h @@ -0,0 +1,6 @@ +#ifndef _LIVEPATCH_CORE_H +#define _LIVEPATCH_CORE_H + +extern struct mutex klp_mutex; + +#endif /* _LIVEPATCH_CORE_H */ diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 0ab7abd53b0b..2de09e0c4e5c 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -21,6 +21,7 @@ #include #include +#include "core.h" #include "patch.h" #include "transition.h" #include "../sched/sched.h" @@ -28,8 +29,6 @@ #define MAX_STACK_ENTRIES 100 #define STACK_ERR_BUF_SIZE 128 -extern struct mutex klp_mutex; - struct klp_patch *klp_transition_patch; static int klp_target_state = KLP_UNDEFINED; From 72f04b50d61c81a2cc3065d3b973e83827760515 Mon Sep 17 00:00:00 2001 From: Zhou Chengming Date: Tue, 28 Mar 2017 21:10:35 +0800 Subject: [PATCH 17/19] livepatch: Reduce the time of finding module symbols It's reported that the time of insmoding a klp.ko for one of our out-tree modules is too long. ~ time sudo insmod klp.ko real 0m23.799s user 0m0.036s sys 0m21.256s Then we found the reason: our out-tree module used a lot of static local variables, so klp.ko has a lot of relocation records which reference the module. Then for each such entry klp_find_object_symbol() is called to resolve it, but this function uses the interface kallsyms_on_each_symbol() even for finding module symbols, so will waste a lot of time on walking through vmlinux kallsyms table many times. This patch changes it to use module_kallsyms_on_each_symbol() for modules symbols. After we apply this patch, the sys time reduced dramatically. ~ time sudo insmod klp.ko real 0m1.007s user 0m0.032s sys 0m0.924s Signed-off-by: Zhou Chengming Acked-by: Josh Poimboeuf Acked-by: Jessica Yu Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index af4643873e71..b4b8bb012a93 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -182,7 +182,10 @@ static int klp_find_object_symbol(const char *objname, const char *name, }; mutex_lock(&module_mutex); - kallsyms_on_each_symbol(klp_find_callback, &args); + if (objname) + module_kallsyms_on_each_symbol(klp_find_callback, &args); + else + kallsyms_on_each_symbol(klp_find_callback, &args); mutex_unlock(&module_mutex); /* From e679af627fe875a51d40b9a2b17f08fbde36e0e2 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Tue, 11 Apr 2017 13:07:48 +0200 Subject: [PATCH 18/19] livepatch: Cancel transition a safe way for immediate patches klp_init_transition() does not set func->transition for immediate patches. Then klp_ftrace_handler() could use the new code immediately. As a result, it is not safe to put the livepatch module in klp_cancel_transition(). This patch reverts most of the last minute changes klp_cancel_transition(). It keeps the warning about a misuse because it still makes sense. Fixes: 3ec24776bfd0 ("livepatch: allow removal of a disabled patch") Signed-off-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- kernel/livepatch/transition.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 2de09e0c4e5c..adc0cc64aa4b 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -120,31 +120,11 @@ done: */ void klp_cancel_transition(void) { - struct klp_patch *patch = klp_transition_patch; - struct klp_object *obj; - struct klp_func *func; - bool immediate_func = false; - if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) return; klp_target_state = KLP_UNPATCHED; klp_complete_transition(); - - /* - * In the enable error path, even immediate patches can be safely - * removed because the transition hasn't been started yet. - * - * klp_complete_transition() doesn't have a module_put() for immediate - * patches, so do it here. - */ - klp_for_each_object(patch, obj) - klp_for_each_func(obj, func) - if (func->immediate) - immediate_func = true; - - if (patch->immediate || immediate_func) - module_put(patch->mod); } /* From 77f8f39a2e463eca89a19b916189d0e4e38f75d8 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 13 Apr 2017 17:59:15 -0500 Subject: [PATCH 19/19] livepatch: add missing printk newlines Add missing newlines to some pr_err() strings. Signed-off-by: Josh Poimboeuf Acked-by: Miroslav Benes Acked-by: Jessica Yu Signed-off-by: Jiri Kosina --- kernel/livepatch/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b4b8bb012a93..8739e9e0bdb8 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -236,7 +236,7 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); if (sym->st_shndx != SHN_LIVEPATCH) { - pr_err("symbol %s is not marked as a livepatch symbol", + pr_err("symbol %s is not marked as a livepatch symbol\n", strtab + sym->st_name); return -EINVAL; } @@ -246,7 +246,7 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) ".klp.sym.%55[^.].%127[^,],%lu", objname, symname, &sympos); if (cnt != 3) { - pr_err("symbol %s has an incorrectly formatted name", + pr_err("symbol %s has an incorrectly formatted name\n", strtab + sym->st_name); return -EINVAL; } @@ -291,7 +291,7 @@ static int klp_write_object_relocations(struct module *pmod, */ cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname); if (cnt != 1) { - pr_err("section %s has an incorrectly formatted name", + pr_err("section %s has an incorrectly formatted name\n", secname); ret = -EINVAL; break; @@ -921,7 +921,7 @@ int klp_register_patch(struct klp_patch *patch) return -EINVAL; if (!is_livepatch_module(patch->mod)) { - pr_err("module %s is not marked as a livepatch module", + pr_err("module %s is not marked as a livepatch module\n", patch->mod->name); return -EINVAL; }