From bbda86e988d4c124e4cfa816291cbd583ae8bfb1 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 10:27:36 -0600 Subject: exit: Implement kthread_exit The way the per task_struct exit_code is used by kernel threads is not quite compatible how it is used by userspace applications. The low byte of the userspace exit_code value encodes the exit signal. While kthreads just use the value as an int holding ordinary kernel function exit status like -EPERM. Add kthread_exit to clearly separate the two kinds of uses. Signed-off-by: "Eric W. Biederman" --- kernel/kthread.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'kernel/kthread.c') diff --git a/kernel/kthread.c b/kernel/kthread.c index 7113003fab63..77b7c3f23f18 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -268,6 +268,21 @@ void kthread_parkme(void) } EXPORT_SYMBOL_GPL(kthread_parkme); +/** + * kthread_exit - Cause the current kthread return @result to kthread_stop(). + * @result: The integer value to return to kthread_stop(). + * + * While kthread_exit can be called directly, it exists so that + * functions which do some additional work in non-modular code such as + * module_put_and_kthread_exit can be implemented. + * + * Does not return. + */ +void __noreturn kthread_exit(long result) +{ + do_exit(result); +} + static int kthread(void *_create) { static const struct sched_param param = { .sched_priority = 0 }; @@ -286,13 +301,13 @@ static int kthread(void *_create) done = xchg(&create->done, NULL); if (!done) { kfree(create); - do_exit(-EINTR); + kthread_exit(-EINTR); } if (!self) { create->result = ERR_PTR(-ENOMEM); complete(done); - do_exit(-ENOMEM); + kthread_exit(-ENOMEM); } self->threadfn = threadfn; @@ -326,7 +341,7 @@ static int kthread(void *_create) __kthread_parkme(self); ret = threadfn(data); } - do_exit(ret); + kthread_exit(ret); } /* called from kernel_clone() to get node information for about to be created task */ @@ -627,7 +642,7 @@ EXPORT_SYMBOL_GPL(kthread_park); * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * - * If threadfn() may call do_exit() itself, the caller must ensure + * If threadfn() may call kthread_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() -- cgit v1.2.3 From cead18552660702a4a46f58e65188fe5f36e9dfe Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 22 Nov 2021 11:15:19 -0600 Subject: exit: Rename complete_and_exit to kthread_complete_and_exit Update complete_and_exit to call kthread_exit instead of do_exit. Change the name to reflect this change in functionality. All of the users of complete_and_exit are causing the current kthread to exit so this change makes it clear what is happening. Move the implementation of kthread_complete_and_exit from kernel/exit.c to to kernel/kthread.c. As this function is kthread specific it makes most sense to live with the kthread functions. There are no functional change. Signed-off-by: "Eric W. Biederman" --- drivers/net/wireless/rsi/rsi_91x_coex.c | 2 +- drivers/net/wireless/rsi/rsi_91x_main.c | 2 +- drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 2 +- drivers/net/wireless/rsi/rsi_91x_usb_ops.c | 2 +- drivers/pnp/pnpbios/core.c | 6 +++--- drivers/staging/rts5208/rtsx.c | 16 ++++++++-------- drivers/usb/atm/usbatm.c | 2 +- drivers/usb/gadget/function/f_mass_storage.c | 2 +- fs/jffs2/background.c | 2 +- include/linux/kernel.h | 1 - include/linux/kthread.h | 1 + kernel/exit.c | 9 --------- kernel/kthread.c | 21 +++++++++++++++++++++ lib/kunit/try-catch.c | 4 ++-- tools/objtool/check.c | 2 +- 15 files changed, 43 insertions(+), 31 deletions(-) (limited to 'kernel/kthread.c') diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index a0c5d02ae88c..8a3d86897ea8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common) rsi_coex_sched_tx_pkts(coex_cb); } while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0); - complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); + kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); } int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1bf71e6c608..c7f5cec5e446 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -260,7 +260,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) if (common->init_done) rsi_core_qos_processor(common); } while (atomic_read(&common->tx_thread.thread_done) == 0); - complete_and_exit(&common->tx_thread.completion, 0); + kthread_complete_and_exit(&common->tx_thread.completion, 0); } #ifdef CONFIG_RSI_COEX diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 8ace1874e5cb..b2b47a0abcbf 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common) rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); atomic_inc(&sdev->rx_thread.thread_done); - complete_and_exit(&sdev->rx_thread.completion, 0); + kthread_complete_and_exit(&sdev->rx_thread.completion, 0); } /** diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 4ffcdde1acb1..5130b0e72adc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common) out: rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__); skb_queue_purge(&dev->rx_q); - complete_and_exit(&dev->rx_thread.completion, 0); + kthread_complete_and_exit(&dev->rx_thread.completion, 0); } diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 669ef4700c1a..f7e86ae9f72f 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused) * No dock to manage */ case PNP_FUNCTION_NOT_SUPPORTED: - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); case PNP_SYSTEM_NOT_DOCKED: d = 0; break; @@ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused) default: pnpbios_print_status("pnp_dock_thread", status); printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n"); - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); } if (d != docked) { if (pnp_dock_event(d, &now) == 0) { @@ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused) } } } - complete_and_exit(&unload_sem, 0); + kthread_complete_and_exit(&unload_sem, 0); } static int pnpbios_get_resources(struct pnp_dev *dev) diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index 91fcf85e150a..5a58dac76c88 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -450,13 +450,13 @@ skip_for_abort: * after the down() -- that's necessary for the thread-shutdown * case. * - * complete_and_exit() goes even further than this -- it is safe in - * the case that the thread of the caller is going away (not just - * the structure) -- this is necessary for the module-remove case. - * This is important in preemption kernels, which transfer the flow - * of execution immediately upon a complete(). + * kthread_complete_and_exit() goes even further than this -- + * it is safe in the case that the thread of the caller is going away + * (not just the structure) -- this is necessary for the module-remove + * case. This is important in preemption kernels, which transfer the + * flow of execution immediately upon a complete(). */ - complete_and_exit(&dev->control_exit, 0); + kthread_complete_and_exit(&dev->control_exit, 0); } static int rtsx_polling_thread(void *__dev) @@ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev) mutex_unlock(&dev->dev_mutex); } - complete_and_exit(&dev->polling_exit, 0); + kthread_complete_and_exit(&dev->polling_exit, 0); } /* @@ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev) /* Should we unbind if no devices were detected? */ } - complete_and_exit(&dev->scanning_done, 0); + kthread_complete_and_exit(&dev->scanning_done, 0); } static void rtsx_init_options(struct rtsx_chip *chip) diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index da17be1ef64e..e3a49d837609 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg) instance->thread = NULL; mutex_unlock(&instance->serialize); - complete_and_exit(&instance->thread_exited, ret); + kthread_complete_and_exit(&instance->thread_exited, ret); } static int usbatm_heavy_init(struct usbatm_data *instance) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 752439690fda..46dd11dcb3a8 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_) up_write(&common->filesem); /* Let fsg_unbind() know the thread has exited */ - complete_and_exit(&common->thread_notifier, 0); + kthread_complete_and_exit(&common->thread_notifier, 0); } diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 2b4d5013dc5d..6da92ecaf66d 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -161,5 +161,5 @@ static int jffs2_garbage_collect_thread(void *_c) spin_lock(&c->erase_completion_lock); c->gc_task = NULL; spin_unlock(&c->erase_completion_lock); - complete_and_exit(&c->gc_thread_exit, 0); + kthread_complete_and_exit(&c->gc_thread_exit, 0); } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 77755ac3e189..055eb203c00e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -187,7 +187,6 @@ static inline void might_fault(void) { } #endif void do_exit(long error_code) __noreturn; -void complete_and_exit(struct completion *, long) __noreturn; extern int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 22c43d419687..d86a7e3b9a52 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -71,6 +71,7 @@ int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); void kthread_exit(long result) __noreturn; +void kthread_complete_and_exit(struct completion *, long) __noreturn; int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/kernel/exit.c b/kernel/exit.c index 57afac845a0a..6c4b04531f17 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -891,15 +891,6 @@ void __noreturn make_task_dead(int signr) do_exit(signr); } -void complete_and_exit(struct completion *comp, long code) -{ - if (comp) - complete(comp); - - do_exit(code); -} -EXPORT_SYMBOL(complete_and_exit); - SYSCALL_DEFINE1(exit, int, error_code) { do_exit((error_code&0xff)<<8); diff --git a/kernel/kthread.c b/kernel/kthread.c index 77b7c3f23f18..4388d6694a7f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -283,6 +283,27 @@ void __noreturn kthread_exit(long result) do_exit(result); } +/** + * kthread_complete_and exit - Exit the current kthread. + * @comp: Completion to complete + * @code: The integer value to return to kthread_stop(). + * + * If present complete @comp and the reuturn code to kthread_stop(). + * + * A kernel thread whose module may be removed after the completion of + * @comp can use this function exit safely. + * + * Does not return. + */ +void __noreturn kthread_complete_and_exit(struct completion *comp, long code) +{ + if (comp) + complete(comp); + + kthread_exit(code); +} +EXPORT_SYMBOL(kthread_complete_and_exit); + static int kthread(void *_create) { static const struct sched_param param = { .sched_priority = 0 }; diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index 0dd434e40487..be38a2c5ecc2 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -17,7 +17,7 @@ void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; - complete_and_exit(try_catch->try_completion, -EFAULT); + kthread_complete_and_exit(try_catch->try_completion, -EFAULT); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); @@ -27,7 +27,7 @@ static int kunit_generic_run_threadfn_adapter(void *data) try_catch->try(try_catch->context); - complete_and_exit(try_catch->try_completion, 0); + kthread_complete_and_exit(try_catch->try_completion, 0); } static unsigned long kunit_test_timeout(void) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 120e9598c11a..282273a1ffa5 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -171,7 +171,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "kthread_exit", "make_task_dead", "__module_put_and_kthread_exit", - "complete_and_exit", + "kthread_complete_and_exit", "__reiserfs_panic", "lbug_with_loc", "fortify_panic", -- cgit v1.2.3 From 40966e316f86b8cfd83abd31ccb4df729309d3e7 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 2 Dec 2021 09:56:14 -0600 Subject: kthread: Ensure struct kthread is present for all kthreads Today the rules are a bit iffy and arbitrary about which kernel threads have struct kthread present. Both idle threads and thread started with create_kthread want struct kthread present so that is effectively all kernel threads. Make the rule that if PF_KTHREAD and the task is running then struct kthread is present. This will allow the kernel thread code to using tsk->exit_code with different semantics from ordinary processes. To make ensure that struct kthread is present for all kernel threads move it's allocation into copy_process. Add a deallocation of struct kthread in exec for processes that were kernel threads. Move the allocation of struct kthread for the initial thread earlier so that it is not repeated for each additional idle thread. Move the initialization of struct kthread into set_kthread_struct so that the structure is always and reliably initailized. Clear set_child_tid in free_kthread_struct to ensure the kthread struct is reliably freed during exec. The function free_kthread_struct does not need to clear vfork_done during exec as exec_mm_release called from exec_mmap has already cleared vfork_done. Signed-off-by: "Eric W. Biederman" --- fs/exec.c | 2 ++ include/linux/kthread.h | 2 +- kernel/fork.c | 4 ++++ kernel/kthread.c | 31 ++++++++++++++----------------- kernel/sched/core.c | 16 ++++++++-------- 5 files changed, 29 insertions(+), 26 deletions(-) (limited to 'kernel/kthread.c') diff --git a/fs/exec.c b/fs/exec.c index 537d92c41105..59cac7c18178 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1307,6 +1307,8 @@ int begin_new_exec(struct linux_binprm * bprm) */ force_uaccess_begin(); + if (me->flags & PF_KTHREAD) + free_kthread_struct(me); me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE | PF_NO_SETAFFINITY); flush_thread(); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index d86a7e3b9a52..4f3433afb54b 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -33,7 +33,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); -void set_kthread_struct(struct task_struct *p); +bool set_kthread_struct(struct task_struct *p); void kthread_set_per_cpu(struct task_struct *k, int cpu); bool kthread_is_per_cpu(struct task_struct *k); diff --git a/kernel/fork.c b/kernel/fork.c index 3244cc56b697..04fa3e5d97af 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2118,6 +2118,10 @@ static __latent_entropy struct task_struct *copy_process( p->io_context = NULL; audit_set_context(p, NULL); cgroup_fork(p); + if (p->flags & PF_KTHREAD) { + if (!set_kthread_struct(p)) + goto bad_fork_cleanup_threadgroup_lock; + } #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { diff --git a/kernel/kthread.c b/kernel/kthread.c index 4388d6694a7f..8e5f44bed027 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -93,20 +93,27 @@ static inline struct kthread *__to_kthread(struct task_struct *p) return kthread; } -void set_kthread_struct(struct task_struct *p) +bool set_kthread_struct(struct task_struct *p) { struct kthread *kthread; - if (__to_kthread(p)) - return; + if (WARN_ON_ONCE(to_kthread(p))) + return false; kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); + if (!kthread) + return false; + + init_completion(&kthread->exited); + init_completion(&kthread->parked); + p->vfork_done = &kthread->exited; + /* * We abuse ->set_child_tid to avoid the new member and because it - * can't be wrongly copied by copy_process(). We also rely on fact - * that the caller can't exec, so PF_KTHREAD can't be cleared. + * can't be wrongly copied by copy_process(). */ p->set_child_tid = (__force void __user *)kthread; + return true; } void free_kthread_struct(struct task_struct *k) @@ -114,13 +121,13 @@ void free_kthread_struct(struct task_struct *k) struct kthread *kthread; /* - * Can be NULL if this kthread was created by kernel_thread() - * or if kmalloc() in kthread() failed. + * Can be NULL if kmalloc() in set_kthread_struct() failed. */ kthread = to_kthread(k); #ifdef CONFIG_BLK_CGROUP WARN_ON_ONCE(kthread && kthread->blkcg_css); #endif + k->set_child_tid = (__force void __user *)NULL; kfree(kthread); } @@ -315,7 +322,6 @@ static int kthread(void *_create) struct kthread *self; int ret; - set_kthread_struct(current); self = to_kthread(current); /* If user was SIGKILLed, I release the structure. */ @@ -325,17 +331,8 @@ static int kthread(void *_create) kthread_exit(-EINTR); } - if (!self) { - create->result = ERR_PTR(-ENOMEM); - complete(done); - kthread_exit(-ENOMEM); - } - self->threadfn = threadfn; self->data = data; - init_completion(&self->exited); - init_completion(&self->parked); - current->vfork_done = &self->exited; /* * The new thread inherited kthreadd's priority and CPU mask. Reset diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3c9b0fda64ac..0404a8c572a1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8599,14 +8599,6 @@ void __init init_idle(struct task_struct *idle, int cpu) __sched_fork(0, idle); - /* - * The idle task doesn't need the kthread struct to function, but it - * is dressed up as a per-CPU kthread and thus needs to play the part - * if we want to avoid special-casing it in code that deals with per-CPU - * kthreads. - */ - set_kthread_struct(idle); - raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_rq_lock(rq); @@ -9427,6 +9419,14 @@ void __init sched_init(void) mmgrab(&init_mm); enter_lazy_tlb(&init_mm, current); + /* + * The idle task doesn't need the kthread struct to function, but it + * is dressed up as a per-CPU kthread and thus needs to play the part + * if we want to avoid special-casing it in code that deals with per-CPU + * kthreads. + */ + WARN_ON(set_kthread_struct(current)); + /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, -- cgit v1.2.3 From 6b1248798eb6f6d5285db214299996ecc5dc1e6b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Dec 2021 11:42:49 -0600 Subject: exit/kthread: Move the exit code for kernel threads into struct kthread The exit code of kernel threads has different semantics than the exit_code of userspace tasks. To avoid confusion and allow the userspace implementation to change as needed move the kernel thread exit code into struct kthread. Signed-off-by: "Eric W. Biederman" --- kernel/kthread.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel/kthread.c') diff --git a/kernel/kthread.c b/kernel/kthread.c index 8e5f44bed027..9c6c532047c4 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -52,6 +52,7 @@ struct kthread_create_info struct kthread { unsigned long flags; unsigned int cpu; + int result; int (*threadfn)(void *); void *data; mm_segment_t oldfs; @@ -287,7 +288,9 @@ EXPORT_SYMBOL_GPL(kthread_parkme); */ void __noreturn kthread_exit(long result) { - do_exit(result); + struct kthread *kthread = to_kthread(current); + kthread->result = result; + do_exit(0); } /** @@ -679,7 +682,7 @@ int kthread_stop(struct task_struct *k) kthread_unpark(k); wake_up_process(k); wait_for_completion(&kthread->exited); - ret = k->exit_code; + ret = kthread->result; put_task_struct(k); trace_sched_kthread_stop_ret(ret); -- cgit v1.2.3 From 5eb6f22823e023ee13391978c329c4bd18f82552 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 14 Dec 2021 11:25:01 -0600 Subject: exit/kthread: Fix the kerneldoc comment for kthread_complete_and_exit I misspelled kthread_complete_and_exit in the kernel doc comment fix it. Link: https://lkml.kernel.org/r/202112141329.KBkyJ5ql-lkp@intel.com Link: https://lkml.kernel.org/r/202112141422.Cykr6YUS-lkp@intel.com Reported-by: kernel test robot Fixes: cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit") Signed-off-by: "Eric W. Biederman" --- kernel/kthread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/kthread.c') diff --git a/kernel/kthread.c b/kernel/kthread.c index 9c6c532047c4..c14707d15341 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -294,7 +294,7 @@ void __noreturn kthread_exit(long result) } /** - * kthread_complete_and exit - Exit the current kthread. + * kthread_complete_and_exit - Exit the current kthread. * @comp: Completion to complete * @code: The integer value to return to kthread_stop(). * -- cgit v1.2.3 From e32cf5dfbe227b355776948b2c9b5691b84d1cbd Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 22 Dec 2021 22:10:09 -0600 Subject: kthread: Generalize pf_io_worker so it can point to struct kthread The point of using set_child_tid to hold the kthread pointer was that it already did what is necessary. There are now restrictions on when set_child_tid can be initialized and when set_child_tid can be used in schedule_tail. Which indicates that continuing to use set_child_tid to hold the kthread pointer is a bad idea. Instead of continuing to use the set_child_tid field of task_struct generalize the pf_io_worker field of task_struct and use it to hold the kthread pointer. Rename pf_io_worker (which is a void * pointer) to worker_private so it can be used to store kthreads struct kthread pointer. Update the kthread code to store the kthread pointer in the worker_private field. Remove the places where set_child_tid had to be dealt with carefully because kthreads also used it. Link: https://lkml.kernel.org/r/CAHk-=wgtFAA9SbVYg0gR1tqPMC17-NYcs0GQkaYg1bGhh1uJQQ@mail.gmail.com Link: https://lkml.kernel.org/r/87a6grvqy8.fsf_-_@email.froward.int.ebiederm.org Suggested-by: Linus Torvalds Signed-off-by: "Eric W. Biederman" --- fs/io-wq.c | 6 +++--- fs/io-wq.h | 2 +- include/linux/sched.h | 4 ++-- kernel/fork.c | 8 +------- kernel/kthread.c | 14 +++++--------- kernel/sched/core.c | 2 +- 6 files changed, 13 insertions(+), 23 deletions(-) (limited to 'kernel/kthread.c') diff --git a/fs/io-wq.c b/fs/io-wq.c index 88202de519f6..e4fc7384b40c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -657,7 +657,7 @@ loop: */ void io_wq_worker_running(struct task_struct *tsk) { - struct io_worker *worker = tsk->pf_io_worker; + struct io_worker *worker = tsk->worker_private; if (!worker) return; @@ -675,7 +675,7 @@ void io_wq_worker_running(struct task_struct *tsk) */ void io_wq_worker_sleeping(struct task_struct *tsk) { - struct io_worker *worker = tsk->pf_io_worker; + struct io_worker *worker = tsk->worker_private; if (!worker) return; @@ -694,7 +694,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk) static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, struct task_struct *tsk) { - tsk->pf_io_worker = worker; + tsk->worker_private = worker; worker->task = tsk; set_cpus_allowed_ptr(tsk, wqe->cpu_mask); tsk->flags |= PF_NO_SETAFFINITY; diff --git a/fs/io-wq.h b/fs/io-wq.h index 41bf37674a49..c7c23947cbcd 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -200,6 +200,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk) static inline bool io_wq_current_is_worker(void) { return in_task() && (current->flags & PF_IO_WORKER) && - current->pf_io_worker; + current->worker_private; } #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 78c351e35fec..52f2fdffa3ab 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -987,8 +987,8 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; - /* PF_IO_WORKER */ - void *pf_io_worker; + /* PF_KTHREAD | PF_IO_WORKER */ + void *worker_private; u64 utime; u64 stime; diff --git a/kernel/fork.c b/kernel/fork.c index 0816be1bb044..6f0293cb29c9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -950,7 +950,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; - tsk->pf_io_worker = NULL; + tsk->worker_private = NULL; account_kernel_stack(tsk, 1); @@ -2032,12 +2032,6 @@ static __latent_entropy struct task_struct *copy_process( siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); } - /* - * This _must_ happen before we call free_task(), i.e. before we jump - * to any of the bad_fork_* labels. This is to avoid freeing - * p->set_child_tid which is (ab)used as a kthread's data pointer for - * kernel threads (PF_KTHREAD). - */ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; /* * Clear TID on mm_release()? diff --git a/kernel/kthread.c b/kernel/kthread.c index c14707d15341..261a3c3b9c6c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -72,7 +72,7 @@ enum KTHREAD_BITS { static inline struct kthread *to_kthread(struct task_struct *k) { WARN_ON(!(k->flags & PF_KTHREAD)); - return (__force void *)k->set_child_tid; + return k->worker_private; } /* @@ -80,7 +80,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) * * Per construction; when: * - * (p->flags & PF_KTHREAD) && p->set_child_tid + * (p->flags & PF_KTHREAD) && p->worker_private * * the task is both a kthread and struct kthread is persistent. However * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and @@ -88,7 +88,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) */ static inline struct kthread *__to_kthread(struct task_struct *p) { - void *kthread = (__force void *)p->set_child_tid; + void *kthread = p->worker_private; if (kthread && !(p->flags & PF_KTHREAD)) kthread = NULL; return kthread; @@ -109,11 +109,7 @@ bool set_kthread_struct(struct task_struct *p) init_completion(&kthread->parked); p->vfork_done = &kthread->exited; - /* - * We abuse ->set_child_tid to avoid the new member and because it - * can't be wrongly copied by copy_process(). - */ - p->set_child_tid = (__force void __user *)kthread; + p->worker_private = kthread; return true; } @@ -128,7 +124,7 @@ void free_kthread_struct(struct task_struct *k) #ifdef CONFIG_BLK_CGROUP WARN_ON_ONCE(kthread && kthread->blkcg_css); #endif - k->set_child_tid = (__force void __user *)NULL; + k->worker_private = NULL; kfree(kthread); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d8adbea77be1..ee222b89c692 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4908,7 +4908,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) finish_task_switch(prev); preempt_enable(); - if (!(current->flags & PF_KTHREAD) && current->set_child_tid) + if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); calculate_sigpending(); -- cgit v1.2.3