blob: a4c921364003aaced3aaf0220f784cbca7beeaca [file] [log] [blame]
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -06001/*
2 * transition.c - Kernel Live Patching transition functions
3 *
4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpu.h>
23#include <linux/stacktrace.h>
Jiri Kosina10517422017-03-08 14:27:05 +010024#include "core.h"
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060025#include "patch.h"
26#include "transition.h"
27#include "../sched/sched.h"
28
29#define MAX_STACK_ENTRIES 100
30#define STACK_ERR_BUF_SIZE 128
31
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060032struct klp_patch *klp_transition_patch;
33
34static int klp_target_state = KLP_UNDEFINED;
35
36/*
37 * This work can be performed periodically to finish patching or unpatching any
38 * "straggler" tasks which failed to transition in the first attempt.
39 */
40static void klp_transition_work_fn(struct work_struct *work)
41{
42 mutex_lock(&klp_mutex);
43
44 if (klp_transition_patch)
45 klp_try_complete_transition();
46
47 mutex_unlock(&klp_mutex);
48}
49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50
51/*
Petr Mladek842c0882017-06-14 10:54:52 +020052 * This function is just a stub to implement a hard force
Paul E. McKenney69326892018-11-07 14:16:57 -080053 * of synchronize_rcu(). This requires synchronizing
Petr Mladek842c0882017-06-14 10:54:52 +020054 * tasks even in userspace and idle.
55 */
56static void klp_sync(struct work_struct *work)
57{
58}
59
60/*
61 * We allow to patch also functions where RCU is not watching,
62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
63 * to do the synchronization. Instead hard force the sched synchronization.
64 *
65 * This approach allows to use RCU functions for manipulating func_stack
66 * safely.
67 */
68static void klp_synchronize_transition(void)
69{
70 schedule_on_each_cpu(klp_sync);
71}
72
73/*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060074 * The transition to the target patch state is complete. Clean up the data
75 * structures.
76 */
77static void klp_complete_transition(void)
78{
79 struct klp_object *obj;
80 struct klp_func *func;
81 struct task_struct *g, *task;
82 unsigned int cpu;
83
Joe Lawrenceaf026792017-10-13 15:08:43 -040084 pr_debug("'%s': completing %s transition\n",
85 klp_transition_patch->mod->name,
86 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
87
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060088 if (klp_target_state == KLP_UNPATCHED) {
89 /*
90 * All tasks have transitioned to KLP_UNPATCHED so we can now
91 * remove the new functions from the func_stack.
92 */
93 klp_unpatch_objects(klp_transition_patch);
94
95 /*
96 * Make sure klp_ftrace_handler() can no longer see functions
97 * from this patch on the ops->func_stack. Otherwise, after
98 * func->transition gets cleared, the handler may choose a
99 * removed function.
100 */
Petr Mladek842c0882017-06-14 10:54:52 +0200101 klp_synchronize_transition();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600102 }
103
Miroslav Benesd0807da2018-01-10 11:01:28 +0100104 klp_for_each_object(klp_transition_patch, obj)
105 klp_for_each_func(obj, func)
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600106 func->transition = false;
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600107
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600108 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
109 if (klp_target_state == KLP_PATCHED)
Petr Mladek842c0882017-06-14 10:54:52 +0200110 klp_synchronize_transition();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600111
112 read_lock(&tasklist_lock);
113 for_each_process_thread(g, task) {
114 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
115 task->patch_state = KLP_UNDEFINED;
116 }
117 read_unlock(&tasklist_lock);
118
119 for_each_possible_cpu(cpu) {
120 task = idle_task(cpu);
121 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
122 task->patch_state = KLP_UNDEFINED;
123 }
124
Joe Lawrence93862e32017-10-13 15:08:41 -0400125 klp_for_each_object(klp_transition_patch, obj) {
126 if (!klp_is_object_loaded(obj))
127 continue;
128 if (klp_target_state == KLP_PATCHED)
129 klp_post_patch_callback(obj);
130 else if (klp_target_state == KLP_UNPATCHED)
131 klp_post_unpatch_callback(obj);
132 }
133
Joe Lawrence6116c302017-10-13 15:08:42 -0400134 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
135 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
136
Joe Lawrence93862e32017-10-13 15:08:41 -0400137 /*
Petr Mladek68007282019-01-09 13:43:22 +0100138 * patch->forced set implies unbounded increase of module's ref count if
Miroslav Benesd0807da2018-01-10 11:01:28 +0100139 * the module is disabled/enabled in a loop.
Joe Lawrence93862e32017-10-13 15:08:41 -0400140 */
Petr Mladek68007282019-01-09 13:43:22 +0100141 if (!klp_transition_patch->forced && klp_target_state == KLP_UNPATCHED)
Joe Lawrence93862e32017-10-13 15:08:41 -0400142 module_put(klp_transition_patch->mod);
Joe Lawrence93862e32017-10-13 15:08:41 -0400143
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600144 klp_target_state = KLP_UNDEFINED;
145 klp_transition_patch = NULL;
146}
147
148/*
149 * This is called in the error path, to cancel a transition before it has
150 * started, i.e. klp_init_transition() has been called but
151 * klp_start_transition() hasn't. If the transition *has* been started,
152 * klp_reverse_transition() should be used instead.
153 */
154void klp_cancel_transition(void)
155{
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600156 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
157 return;
158
Joe Lawrenceaf026792017-10-13 15:08:43 -0400159 pr_debug("'%s': canceling patching transition, going to unpatch\n",
160 klp_transition_patch->mod->name);
161
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600162 klp_target_state = KLP_UNPATCHED;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600163 klp_complete_transition();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600164}
165
166/*
167 * Switch the patched state of the task to the set of functions in the target
168 * patch state.
169 *
170 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
171 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
172 */
173void klp_update_patch_state(struct task_struct *task)
174{
Petr Mladek842c0882017-06-14 10:54:52 +0200175 /*
Paul E. McKenney69326892018-11-07 14:16:57 -0800176 * A variant of synchronize_rcu() is used to allow patching functions
Petr Mladek842c0882017-06-14 10:54:52 +0200177 * where RCU is not watching, see klp_synchronize_transition().
178 */
179 preempt_disable_notrace();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600180
181 /*
182 * This test_and_clear_tsk_thread_flag() call also serves as a read
183 * barrier (smp_rmb) for two cases:
184 *
185 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
186 * klp_target_state read. The corresponding write barrier is in
187 * klp_init_transition().
188 *
189 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
190 * of func->transition, if klp_ftrace_handler() is called later on
191 * the same CPU. See __klp_disable_patch().
192 */
193 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
194 task->patch_state = READ_ONCE(klp_target_state);
195
Petr Mladek842c0882017-06-14 10:54:52 +0200196 preempt_enable_notrace();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600197}
198
199/*
200 * Determine whether the given stack trace includes any references to a
201 * to-be-patched or to-be-unpatched function.
202 */
203static int klp_check_stack_func(struct klp_func *func,
204 struct stack_trace *trace)
205{
206 unsigned long func_addr, func_size, address;
207 struct klp_ops *ops;
208 int i;
209
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600210 for (i = 0; i < trace->nr_entries; i++) {
211 address = trace->entries[i];
212
213 if (klp_target_state == KLP_UNPATCHED) {
214 /*
215 * Check for the to-be-unpatched function
216 * (the func itself).
217 */
218 func_addr = (unsigned long)func->new_func;
219 func_size = func->new_size;
220 } else {
221 /*
222 * Check for the to-be-patched function
223 * (the previous func).
224 */
Petr Mladek19514912019-01-09 13:43:19 +0100225 ops = klp_find_ops(func->old_func);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600226
227 if (list_is_singular(&ops->func_stack)) {
228 /* original function */
Petr Mladek19514912019-01-09 13:43:19 +0100229 func_addr = (unsigned long)func->old_func;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600230 func_size = func->old_size;
231 } else {
232 /* previously patched function */
233 struct klp_func *prev;
234
235 prev = list_next_entry(func, stack_node);
236 func_addr = (unsigned long)prev->new_func;
237 func_size = prev->new_size;
238 }
239 }
240
241 if (address >= func_addr && address < func_addr + func_size)
242 return -EAGAIN;
243 }
244
245 return 0;
246}
247
248/*
249 * Determine whether it's safe to transition the task to the target patch state
250 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
251 */
252static int klp_check_stack(struct task_struct *task, char *err_buf)
253{
254 static unsigned long entries[MAX_STACK_ENTRIES];
255 struct stack_trace trace;
256 struct klp_object *obj;
257 struct klp_func *func;
258 int ret;
259
260 trace.skip = 0;
261 trace.nr_entries = 0;
262 trace.max_entries = MAX_STACK_ENTRIES;
263 trace.entries = entries;
264 ret = save_stack_trace_tsk_reliable(task, &trace);
265 WARN_ON_ONCE(ret == -ENOSYS);
266 if (ret) {
267 snprintf(err_buf, STACK_ERR_BUF_SIZE,
268 "%s: %s:%d has an unreliable stack\n",
269 __func__, task->comm, task->pid);
270 return ret;
271 }
272
273 klp_for_each_object(klp_transition_patch, obj) {
274 if (!obj->patched)
275 continue;
276 klp_for_each_func(obj, func) {
277 ret = klp_check_stack_func(func, &trace);
278 if (ret) {
279 snprintf(err_buf, STACK_ERR_BUF_SIZE,
280 "%s: %s:%d is sleeping on function %s\n",
281 __func__, task->comm, task->pid,
282 func->old_name);
283 return ret;
284 }
285 }
286 }
287
288 return 0;
289}
290
291/*
292 * Try to safely switch a task to the target patch state. If it's currently
293 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
294 * if the stack is unreliable, return false.
295 */
296static bool klp_try_switch_task(struct task_struct *task)
297{
298 struct rq *rq;
299 struct rq_flags flags;
300 int ret;
301 bool success = false;
302 char err_buf[STACK_ERR_BUF_SIZE];
303
304 err_buf[0] = '\0';
305
306 /* check if this task has already switched over */
307 if (task->patch_state == klp_target_state)
308 return true;
309
310 /*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600311 * Now try to check the stack for any to-be-patched or to-be-unpatched
312 * functions. If all goes well, switch the task to the target patch
313 * state.
314 */
315 rq = task_rq_lock(task, &flags);
316
317 if (task_running(rq, task) && task != current) {
318 snprintf(err_buf, STACK_ERR_BUF_SIZE,
319 "%s: %s:%d is running\n", __func__, task->comm,
320 task->pid);
321 goto done;
322 }
323
324 ret = klp_check_stack(task, err_buf);
325 if (ret)
326 goto done;
327
328 success = true;
329
330 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
331 task->patch_state = klp_target_state;
332
333done:
334 task_rq_unlock(rq, task, &flags);
335
336 /*
337 * Due to console deadlock issues, pr_debug() can't be used while
338 * holding the task rq lock. Instead we have to use a temporary buffer
339 * and print the debug message after releasing the lock.
340 */
341 if (err_buf[0] != '\0')
342 pr_debug("%s", err_buf);
343
344 return success;
345
346}
347
348/*
349 * Try to switch all remaining tasks to the target patch state by walking the
350 * stacks of sleeping tasks and looking for any to-be-patched or
351 * to-be-unpatched functions. If such functions are found, the task can't be
352 * switched yet.
353 *
354 * If any tasks are still stuck in the initial patch state, schedule a retry.
355 */
356void klp_try_complete_transition(void)
357{
358 unsigned int cpu;
359 struct task_struct *g, *task;
360 bool complete = true;
361
362 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
363
364 /*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600365 * Try to switch the tasks to the target patch state by walking their
366 * stacks and looking for any to-be-patched or to-be-unpatched
367 * functions. If such functions are found on a stack, or if the stack
368 * is deemed unreliable, the task can't be switched yet.
369 *
370 * Usually this will transition most (or all) of the tasks on a system
371 * unless the patch includes changes to a very common function.
372 */
373 read_lock(&tasklist_lock);
374 for_each_process_thread(g, task)
375 if (!klp_try_switch_task(task))
376 complete = false;
377 read_unlock(&tasklist_lock);
378
379 /*
380 * Ditto for the idle "swapper" tasks.
381 */
382 get_online_cpus();
383 for_each_possible_cpu(cpu) {
384 task = idle_task(cpu);
385 if (cpu_online(cpu)) {
386 if (!klp_try_switch_task(task))
387 complete = false;
388 } else if (task->patch_state != klp_target_state) {
389 /* offline idle tasks can be switched immediately */
390 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
391 task->patch_state = klp_target_state;
392 }
393 }
394 put_online_cpus();
395
396 if (!complete) {
397 /*
398 * Some tasks weren't able to be switched over. Try again
399 * later and/or wait for other methods like kernel exit
400 * switching.
401 */
402 schedule_delayed_work(&klp_transition_work,
403 round_jiffies_relative(HZ));
404 return;
405 }
406
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600407 /* we're done, now cleanup the data structures */
408 klp_complete_transition();
409}
410
411/*
412 * Start the transition to the specified target patch state so tasks can begin
413 * switching to it.
414 */
415void klp_start_transition(void)
416{
417 struct task_struct *g, *task;
418 unsigned int cpu;
419
420 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
421
Joe Lawrenceaf026792017-10-13 15:08:43 -0400422 pr_notice("'%s': starting %s transition\n",
423 klp_transition_patch->mod->name,
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600424 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
425
426 /*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600427 * Mark all normal tasks as needing a patch state update. They'll
428 * switch either in klp_try_complete_transition() or as they exit the
429 * kernel.
430 */
431 read_lock(&tasklist_lock);
432 for_each_process_thread(g, task)
433 if (task->patch_state != klp_target_state)
434 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
435 read_unlock(&tasklist_lock);
436
437 /*
438 * Mark all idle tasks as needing a patch state update. They'll switch
439 * either in klp_try_complete_transition() or at the idle loop switch
440 * point.
441 */
442 for_each_possible_cpu(cpu) {
443 task = idle_task(cpu);
444 if (task->patch_state != klp_target_state)
445 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
446 }
447}
448
449/*
450 * Initialize the global target patch state and all tasks to the initial patch
451 * state, and initialize all function transition states to true in preparation
452 * for patching or unpatching.
453 */
454void klp_init_transition(struct klp_patch *patch, int state)
455{
456 struct task_struct *g, *task;
457 unsigned int cpu;
458 struct klp_object *obj;
459 struct klp_func *func;
460 int initial_state = !state;
461
462 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
463
464 klp_transition_patch = patch;
465
466 /*
467 * Set the global target patch state which tasks will switch to. This
468 * has no effect until the TIF_PATCH_PENDING flags get set later.
469 */
470 klp_target_state = state;
471
Joe Lawrenceaf026792017-10-13 15:08:43 -0400472 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
473 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
474
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600475 /*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600476 * Initialize all tasks to the initial patch state to prepare them for
477 * switching to the target state.
478 */
479 read_lock(&tasklist_lock);
480 for_each_process_thread(g, task) {
481 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
482 task->patch_state = initial_state;
483 }
484 read_unlock(&tasklist_lock);
485
486 /*
487 * Ditto for the idle "swapper" tasks.
488 */
489 for_each_possible_cpu(cpu) {
490 task = idle_task(cpu);
491 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
492 task->patch_state = initial_state;
493 }
494
495 /*
496 * Enforce the order of the task->patch_state initializations and the
497 * func->transition updates to ensure that klp_ftrace_handler() doesn't
498 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
499 *
500 * Also enforce the order of the klp_target_state write and future
501 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
502 * set a task->patch_state to KLP_UNDEFINED.
503 */
504 smp_wmb();
505
506 /*
507 * Set the func transition states so klp_ftrace_handler() will know to
508 * switch to the transition logic.
509 *
510 * When patching, the funcs aren't yet in the func_stack and will be
511 * made visible to the ftrace handler shortly by the calls to
512 * klp_patch_object().
513 *
514 * When unpatching, the funcs are already in the func_stack and so are
515 * already visible to the ftrace handler.
516 */
517 klp_for_each_object(patch, obj)
518 klp_for_each_func(obj, func)
519 func->transition = true;
520}
521
522/*
523 * This function can be called in the middle of an existing transition to
524 * reverse the direction of the target patch state. This can be done to
525 * effectively cancel an existing enable or disable operation if there are any
526 * tasks which are stuck in the initial patch state.
527 */
528void klp_reverse_transition(void)
529{
530 unsigned int cpu;
531 struct task_struct *g, *task;
532
Joe Lawrenceaf026792017-10-13 15:08:43 -0400533 pr_debug("'%s': reversing transition from %s\n",
534 klp_transition_patch->mod->name,
535 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
536 "unpatching to patching");
537
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600538 klp_transition_patch->enabled = !klp_transition_patch->enabled;
539
540 klp_target_state = !klp_target_state;
541
542 /*
543 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
544 * klp_update_patch_state() running in parallel with
545 * klp_start_transition().
546 */
547 read_lock(&tasklist_lock);
548 for_each_process_thread(g, task)
549 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
550 read_unlock(&tasklist_lock);
551
552 for_each_possible_cpu(cpu)
553 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
554
555 /* Let any remaining calls to klp_update_patch_state() complete */
Petr Mladek842c0882017-06-14 10:54:52 +0200556 klp_synchronize_transition();
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600557
558 klp_start_transition();
559}
560
561/* Called from copy_process() during fork */
562void klp_copy_process(struct task_struct *child)
563{
564 child->patch_state = current->patch_state;
565
566 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
567}
Miroslav Benes43347d52017-11-15 14:50:13 +0100568
569/*
570 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
571 * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
572 * action currently.
573 */
574void klp_send_signals(void)
575{
576 struct task_struct *g, *task;
577
578 pr_notice("signaling remaining tasks\n");
579
580 read_lock(&tasklist_lock);
581 for_each_process_thread(g, task) {
582 if (!klp_patch_pending(task))
583 continue;
584
585 /*
586 * There is a small race here. We could see TIF_PATCH_PENDING
587 * set and decide to wake up a kthread or send a fake signal.
588 * Meanwhile the task could migrate itself and the action
589 * would be meaningless. It is not serious though.
590 */
591 if (task->flags & PF_KTHREAD) {
592 /*
593 * Wake up a kthread which sleeps interruptedly and
594 * still has not been migrated.
595 */
596 wake_up_state(task, TASK_INTERRUPTIBLE);
597 } else {
598 /*
599 * Send fake signal to all non-kthread tasks which are
600 * still not migrated.
601 */
602 spin_lock_irq(&task->sighand->siglock);
603 signal_wake_up(task, 0);
604 spin_unlock_irq(&task->sighand->siglock);
605 }
606 }
607 read_unlock(&tasklist_lock);
608}
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100609
610/*
611 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
612 * existing transition to finish.
613 *
614 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
615 * 'current'. This is not the case here and the consistency model could be
616 * broken. Administrator, who is the only one to execute the
617 * klp_force_transitions(), has to be aware of this.
618 */
619void klp_force_transition(void)
620{
Petr Mladek68007282019-01-09 13:43:22 +0100621 struct klp_patch *patch;
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100622 struct task_struct *g, *task;
623 unsigned int cpu;
624
625 pr_warn("forcing remaining tasks to the patched state\n");
626
627 read_lock(&tasklist_lock);
628 for_each_process_thread(g, task)
629 klp_update_patch_state(task);
630 read_unlock(&tasklist_lock);
631
632 for_each_possible_cpu(cpu)
633 klp_update_patch_state(idle_task(cpu));
634
Petr Mladek68007282019-01-09 13:43:22 +0100635 list_for_each_entry(patch, &klp_patches, list)
636 patch->forced = true;
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100637}