Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/jikos/livepatching

Pull livepatching fix from Jiri Kosina:
 "Fix the way how livepatches are being stacked with respect to RCU,
  from Petr Mladek"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: Fix stacking of patches with respect to RCU
  • Loading branch information
torvalds committed Jun 21, 2017
2 parents 021f601 + 842c088 commit dcba710
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 7 deletions.
8 changes: 6 additions & 2 deletions kernel/livepatch/patch.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,

ops = container_of(fops, struct klp_ops, fops);

rcu_read_lock();
/*
* A variant of synchronize_sched() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();

func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
stack_node);
Expand Down Expand Up @@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,

klp_arch_set_pc(regs, (unsigned long)func->new_func);
unlock:
rcu_read_unlock();
preempt_enable_notrace();
}

/*
Expand Down
36 changes: 31 additions & 5 deletions kernel/livepatch/transition.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
}
static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);

/*
* This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing
* tasks even in userspace and idle.
*/
static void klp_sync(struct work_struct *work)
{
}

/*
* We allow to patch also functions where RCU is not watching,
* e.g. before user_exit(). We can not rely on the RCU infrastructure
* to do the synchronization. Instead hard force the sched synchronization.
*
* This approach allows to use RCU functions for manipulating func_stack
* safely.
*/
static void klp_synchronize_transition(void)
{
schedule_on_each_cpu(klp_sync);
}

/*
* The transition to the target patch state is complete. Clean up the data
* structures.
Expand All @@ -73,7 +95,7 @@ static void klp_complete_transition(void)
* func->transition gets cleared, the handler may choose a
* removed function.
*/
synchronize_rcu();
klp_synchronize_transition();
}

if (klp_transition_patch->immediate)
Expand All @@ -92,7 +114,7 @@ static void klp_complete_transition(void)

/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
if (klp_target_state == KLP_PATCHED)
synchronize_rcu();
klp_synchronize_transition();

read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
Expand Down Expand Up @@ -136,7 +158,11 @@ void klp_cancel_transition(void)
*/
void klp_update_patch_state(struct task_struct *task)
{
rcu_read_lock();
/*
* A variant of synchronize_sched() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();

/*
* This test_and_clear_tsk_thread_flag() call also serves as a read
Expand All @@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
task->patch_state = READ_ONCE(klp_target_state);

rcu_read_unlock();
preempt_enable_notrace();
}

/*
Expand Down Expand Up @@ -539,7 +565,7 @@ void klp_reverse_transition(void)
clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);

/* Let any remaining calls to klp_update_patch_state() complete */
synchronize_rcu();
klp_synchronize_transition();

klp_start_transition();
}
Expand Down

0 comments on commit dcba710

Please sign in to comment.