diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-03-26 12:03:37 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-03-26 12:03:37 -0700 |
| commit | 25b69ebe28c8e3f883b071e924b87d358db56047 (patch) | |
| tree | a93e219ce73182ea51727f25e97df8b336d22b0b /security | |
| parent | 453a4a5f97f0c95b7df458e6afb98d4ab057d90b (diff) | |
| parent | a23811061a553c70c42de0e811b2ec15b2d54157 (diff) | |
Merge tag 'landlock-7.0-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/mic/linux
Pull Landlock fixes from Mickaël Salaün:
"This mainly fixes Landlock TSYNC issues related to interrupts and
unexpected task exit.
Other fixes touch documentation and sample, and a new test extends
coverage"
* tag 'landlock-7.0-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/mic/linux:
landlock: Expand restrict flags example for ABI version 8
selftests/landlock: Test tsync interruption and cancellation paths
landlock: Clean up interrupted thread logic in TSYNC
landlock: Serialize TSYNC thread restriction
samples/landlock: Bump ABI version to 8
landlock: Improve TSYNC types
landlock: Fully release unused TSYNC work entries
landlock: Fix formatting
Diffstat (limited to 'security')
| -rw-r--r-- | security/landlock/domain.c | 3 | ||||
| -rw-r--r-- | security/landlock/ruleset.c | 9 | ||||
| -rw-r--r-- | security/landlock/tsync.c | 92 |
3 files changed, 78 insertions, 26 deletions
diff --git a/security/landlock/domain.c b/security/landlock/domain.c index f5b78d4766cd..f0d83f43afa1 100644 --- a/security/landlock/domain.c +++ b/security/landlock/domain.c @@ -94,8 +94,7 @@ static struct landlock_details *get_current_details(void) * allocate with GFP_KERNEL_ACCOUNT because it is independent from the * caller. */ - details = - kzalloc_flex(*details, exe_path, path_size); + details = kzalloc_flex(*details, exe_path, path_size); if (!details) return ERR_PTR(-ENOMEM); diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 319873586385..73018dc8d6c7 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -32,9 +32,8 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) { struct landlock_ruleset *new_ruleset; - new_ruleset = - kzalloc_flex(*new_ruleset, access_masks, num_layers, - GFP_KERNEL_ACCOUNT); + new_ruleset = kzalloc_flex(*new_ruleset, access_masks, num_layers, + GFP_KERNEL_ACCOUNT); if (!new_ruleset) return ERR_PTR(-ENOMEM); refcount_set(&new_ruleset->usage, 1); @@ -559,8 +558,8 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent, if (IS_ERR(new_dom)) return new_dom; - new_dom->hierarchy = kzalloc_obj(*new_dom->hierarchy, - GFP_KERNEL_ACCOUNT); + new_dom->hierarchy = + kzalloc_obj(*new_dom->hierarchy, GFP_KERNEL_ACCOUNT); if (!new_dom->hierarchy) return ERR_PTR(-ENOMEM); diff --git a/security/landlock/tsync.c b/security/landlock/tsync.c index de01aa899751..4d4427ba8d93 100644 --- a/security/landlock/tsync.c +++ b/security/landlock/tsync.c @@ -203,6 +203,40 @@ static struct tsync_work *tsync_works_provide(struct tsync_works *s, return ctx; } +/** + * tsync_works_trim - Put the last tsync_work element + * + * @s: TSYNC works to trim. + * + * Put the last task and decrement the size of @s. + * + * This helper does not cancel a running task, but just reset the last element + * to zero. + */ +static void tsync_works_trim(struct tsync_works *s) +{ + struct tsync_work *ctx; + + if (WARN_ON_ONCE(s->size <= 0)) + return; + + ctx = s->works[s->size - 1]; + + /* + * For consistency, remove the task from ctx so that it does not look like + * we handed it a task_work. + */ + put_task_struct(ctx->task); + *ctx = (typeof(*ctx)){}; + + /* + * Cancel the tsync_works_provide() change to recycle the reserved memory + * for the next thread, if any. This also ensures that cancel_tsync_works() + * and tsync_works_release() do not see any NULL task pointers. + */ + s->size--; +} + /* * tsync_works_grow_by - preallocates space for n more contexts in s * @@ -256,13 +290,14 @@ static int tsync_works_grow_by(struct tsync_works *s, size_t n, gfp_t flags) * tsync_works_contains - checks for presence of task in s */ static bool tsync_works_contains_task(const struct tsync_works *s, - struct task_struct *task) + const struct task_struct *task) { size_t i; for (i = 0; i < s->size; i++) if (s->works[i]->task == task) return true; + return false; } @@ -276,7 +311,7 @@ static void tsync_works_release(struct tsync_works *s) size_t i; for (i = 0; i < s->size; i++) { - if (!s->works[i]->task) + if (WARN_ON_ONCE(!s->works[i]->task)) continue; put_task_struct(s->works[i]->task); @@ -284,6 +319,7 @@ static void tsync_works_release(struct tsync_works *s) for (i = 0; i < s->capacity; i++) kfree(s->works[i]); + kfree(s->works); s->works = NULL; s->size = 0; @@ -295,7 +331,7 @@ static void tsync_works_release(struct tsync_works *s) */ static size_t count_additional_threads(const struct tsync_works *works) { - struct task_struct *thread, *caller; + const struct task_struct *caller, *thread; size_t n = 0; caller = current; @@ -334,7 +370,8 @@ static bool schedule_task_work(struct tsync_works *works, struct tsync_shared_context *shared_ctx) { int err; - struct task_struct *thread, *caller; + const struct task_struct *caller; + struct task_struct *thread; struct tsync_work *ctx; bool found_more_threads = false; @@ -379,16 +416,14 @@ static bool schedule_task_work(struct tsync_works *works, init_task_work(&ctx->work, restrict_one_thread_callback); err = task_work_add(thread, &ctx->work, TWA_SIGNAL); - if (err) { + if (unlikely(err)) { /* * task_work_add() only fails if the task is about to exit. We * checked that earlier, but it can happen as a race. Resume * without setting an error, as the task is probably gone in the - * next loop iteration. For consistency, remove the task from ctx - * so that it does not look like we handed it a task_work. + * next loop iteration. */ - put_task_struct(ctx->task); - ctx->task = NULL; + tsync_works_trim(works); atomic_dec(&shared_ctx->num_preparing); atomic_dec(&shared_ctx->num_unfinished); @@ -406,12 +441,15 @@ static bool schedule_task_work(struct tsync_works *works, * shared_ctx->num_preparing and shared_ctx->num_unfished and mark the two * completions if needed, as if the task was never scheduled. */ -static void cancel_tsync_works(struct tsync_works *works, +static void cancel_tsync_works(const struct tsync_works *works, struct tsync_shared_context *shared_ctx) { - int i; + size_t i; for (i = 0; i < works->size; i++) { + if (WARN_ON_ONCE(!works->works[i]->task)) + continue; + if (!task_work_cancel(works->works[i]->task, &works->works[i]->work)) continue; @@ -448,6 +486,16 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred, shared_ctx.set_no_new_privs = task_no_new_privs(current); /* + * Serialize concurrent TSYNC operations to prevent deadlocks when + * multiple threads call landlock_restrict_self() simultaneously. + * If the lock is already held, we gracefully yield by restarting the + * syscall. This allows the current thread to process pending + * task_works before retrying. + */ + if (!down_write_trylock(¤t->signal->exec_update_lock)) + return restart_syscall(); + + /* * We schedule a pseudo-signal task_work for each of the calling task's * sibling threads. In the task work, each thread: * @@ -527,24 +575,30 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred, -ERESTARTNOINTR); /* - * Cancel task works for tasks that did not start running yet, - * and decrement all_prepared and num_unfinished accordingly. + * Opportunistic improvement: try to cancel task + * works for tasks that did not start running + * yet. We do not have a guarantee that it + * cancels any of the enqueued task works + * because task_work_run() might already have + * dequeued them. */ cancel_tsync_works(&works, &shared_ctx); /* - * The remaining task works have started running, so waiting for - * their completion will finish. + * Break the loop with error. The cleanup code + * after the loop unblocks the remaining + * task_works. */ - wait_for_completion(&shared_ctx.all_prepared); + break; } } } while (found_more_threads && !atomic_read(&shared_ctx.preparation_error)); /* - * We now have all sibling threads blocking and in "prepared" state in the - * task work. Ask all threads to commit. + * We now have either (a) all sibling threads blocking and in "prepared" + * state in the task work, or (b) the preparation error is set. Ask all + * threads to commit (or abort). */ complete_all(&shared_ctx.ready_to_commit); @@ -556,6 +610,6 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred, wait_for_completion(&shared_ctx.all_finished); tsync_works_release(&works); - + up_write(¤t->signal->exec_update_lock); return atomic_read(&shared_ctx.preparation_error); } |
