diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2022-02-21 11:33:57 +0100 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2022-02-21 11:33:57 +0100 |
| commit | d2206fcabdfaff3958ab67cc5b8f63257e57b889 (patch) | |
| tree | f6bc697bb2d10e49e5123b4e0cc30c11aafc3120 /kernel/fork.c | |
| parent | fe13889c390e14205e064d7e159e61eb5da4b1c3 (diff) | |
| parent | 509853f9e1e7b1490dc79f735a5dbafc9298f40d (diff) | |
| download | linux-d2206fcabdfaff3958ab67cc5b8f63257e57b889.tar.gz linux-d2206fcabdfaff3958ab67cc5b8f63257e57b889.tar.bz2 linux-d2206fcabdfaff3958ab67cc5b8f63257e57b889.zip | |
Merge tag 'irq-api-2022-02-21' into irq/core
Merge the generic_handle_irq_safe() API back into irq/core.
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 30 |
1 files changed, 20 insertions, 10 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index d75a528f7b21..a024bf6254df 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2021,18 +2021,18 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif + retval = copy_creds(p, clone_flags); + if (retval < 0) + goto bad_fork_free; + retval = -EAGAIN; if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) - goto bad_fork_free; + goto bad_fork_cleanup_count; } current->flags &= ~PF_NPROC_EXCEEDED; - retval = copy_creds(p, clone_flags); - if (retval < 0) - goto bad_fork_free; - /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there @@ -2267,6 +2267,17 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_put_pidfd; /* + * Now that the cgroups are pinned, re-clone the parent cgroup and put + * the new task on the correct runqueue. All this *before* the task + * becomes visible. + * + * This isn't part of ->can_fork() because while the re-cloning is + * cgroup specific, it unconditionally needs to place the task on a + * runqueue. + */ + sched_cgroup_fork(p, args); + + /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do * not want user-space to be able to predict the process start-time by @@ -2323,10 +2334,6 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } - /* past the last point of failure */ - if (pidfile) - fd_install(pidfd, pidfile); - init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); @@ -2375,8 +2382,11 @@ static __latent_entropy struct task_struct *copy_process( syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); + if (pidfile) + fd_install(pidfd, pidfile); + proc_fork_connector(p); - sched_post_fork(p, args); + sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); |
