summaryrefslogtreecommitdiff
path: root/security/landlock/tsync.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/landlock/tsync.c')
-rw-r--r--security/landlock/tsync.c92
1 files changed, 73 insertions, 19 deletions
diff --git a/security/landlock/tsync.c b/security/landlock/tsync.c
index de01aa899751..4d4427ba8d93 100644
--- a/security/landlock/tsync.c
+++ b/security/landlock/tsync.c
@@ -203,6 +203,40 @@ static struct tsync_work *tsync_works_provide(struct tsync_works *s,
return ctx;
}
+/**
+ * tsync_works_trim - Put the last tsync_work element
+ *
+ * @s: TSYNC works to trim.
+ *
+ * Put the last task and decrement the size of @s.
+ *
+ * This helper does not cancel a running task, but just reset the last element
+ * to zero.
+ */
+static void tsync_works_trim(struct tsync_works *s)
+{
+ struct tsync_work *ctx;
+
+ if (WARN_ON_ONCE(s->size <= 0))
+ return;
+
+ ctx = s->works[s->size - 1];
+
+ /*
+ * For consistency, remove the task from ctx so that it does not look like
+ * we handed it a task_work.
+ */
+ put_task_struct(ctx->task);
+ *ctx = (typeof(*ctx)){};
+
+ /*
+ * Cancel the tsync_works_provide() change to recycle the reserved memory
+ * for the next thread, if any. This also ensures that cancel_tsync_works()
+ * and tsync_works_release() do not see any NULL task pointers.
+ */
+ s->size--;
+}
+
/*
* tsync_works_grow_by - preallocates space for n more contexts in s
*
@@ -256,13 +290,14 @@ static int tsync_works_grow_by(struct tsync_works *s, size_t n, gfp_t flags)
* tsync_works_contains - checks for presence of task in s
*/
static bool tsync_works_contains_task(const struct tsync_works *s,
- struct task_struct *task)
+ const struct task_struct *task)
{
size_t i;
for (i = 0; i < s->size; i++)
if (s->works[i]->task == task)
return true;
+
return false;
}
@@ -276,7 +311,7 @@ static void tsync_works_release(struct tsync_works *s)
size_t i;
for (i = 0; i < s->size; i++) {
- if (!s->works[i]->task)
+ if (WARN_ON_ONCE(!s->works[i]->task))
continue;
put_task_struct(s->works[i]->task);
@@ -284,6 +319,7 @@ static void tsync_works_release(struct tsync_works *s)
for (i = 0; i < s->capacity; i++)
kfree(s->works[i]);
+
kfree(s->works);
s->works = NULL;
s->size = 0;
@@ -295,7 +331,7 @@ static void tsync_works_release(struct tsync_works *s)
*/
static size_t count_additional_threads(const struct tsync_works *works)
{
- struct task_struct *thread, *caller;
+ const struct task_struct *caller, *thread;
size_t n = 0;
caller = current;
@@ -334,7 +370,8 @@ static bool schedule_task_work(struct tsync_works *works,
struct tsync_shared_context *shared_ctx)
{
int err;
- struct task_struct *thread, *caller;
+ const struct task_struct *caller;
+ struct task_struct *thread;
struct tsync_work *ctx;
bool found_more_threads = false;
@@ -379,16 +416,14 @@ static bool schedule_task_work(struct tsync_works *works,
init_task_work(&ctx->work, restrict_one_thread_callback);
err = task_work_add(thread, &ctx->work, TWA_SIGNAL);
- if (err) {
+ if (unlikely(err)) {
/*
* task_work_add() only fails if the task is about to exit. We
* checked that earlier, but it can happen as a race. Resume
* without setting an error, as the task is probably gone in the
- * next loop iteration. For consistency, remove the task from ctx
- * so that it does not look like we handed it a task_work.
+ * next loop iteration.
*/
- put_task_struct(ctx->task);
- ctx->task = NULL;
+ tsync_works_trim(works);
atomic_dec(&shared_ctx->num_preparing);
atomic_dec(&shared_ctx->num_unfinished);
@@ -406,12 +441,15 @@ static bool schedule_task_work(struct tsync_works *works,
* shared_ctx->num_preparing and shared_ctx->num_unfished and mark the two
* completions if needed, as if the task was never scheduled.
*/
-static void cancel_tsync_works(struct tsync_works *works,
+static void cancel_tsync_works(const struct tsync_works *works,
struct tsync_shared_context *shared_ctx)
{
- int i;
+ size_t i;
for (i = 0; i < works->size; i++) {
+ if (WARN_ON_ONCE(!works->works[i]->task))
+ continue;
+
if (!task_work_cancel(works->works[i]->task,
&works->works[i]->work))
continue;
@@ -448,6 +486,16 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
shared_ctx.set_no_new_privs = task_no_new_privs(current);
/*
+ * Serialize concurrent TSYNC operations to prevent deadlocks when
+ * multiple threads call landlock_restrict_self() simultaneously.
+ * If the lock is already held, we gracefully yield by restarting the
+ * syscall. This allows the current thread to process pending
+ * task_works before retrying.
+ */
+ if (!down_write_trylock(&current->signal->exec_update_lock))
+ return restart_syscall();
+
+ /*
* We schedule a pseudo-signal task_work for each of the calling task's
* sibling threads. In the task work, each thread:
*
@@ -527,24 +575,30 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
-ERESTARTNOINTR);
/*
- * Cancel task works for tasks that did not start running yet,
- * and decrement all_prepared and num_unfinished accordingly.
+ * Opportunistic improvement: try to cancel task
+ * works for tasks that did not start running
+ * yet. We do not have a guarantee that it
+ * cancels any of the enqueued task works
+ * because task_work_run() might already have
+ * dequeued them.
*/
cancel_tsync_works(&works, &shared_ctx);
/*
- * The remaining task works have started running, so waiting for
- * their completion will finish.
+ * Break the loop with error. The cleanup code
+ * after the loop unblocks the remaining
+ * task_works.
*/
- wait_for_completion(&shared_ctx.all_prepared);
+ break;
}
}
} while (found_more_threads &&
!atomic_read(&shared_ctx.preparation_error));
/*
- * We now have all sibling threads blocking and in "prepared" state in the
- * task work. Ask all threads to commit.
+ * We now have either (a) all sibling threads blocking and in "prepared"
+ * state in the task work, or (b) the preparation error is set. Ask all
+ * threads to commit (or abort).
*/
complete_all(&shared_ctx.ready_to_commit);
@@ -556,6 +610,6 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
wait_for_completion(&shared_ctx.all_finished);
tsync_works_release(&works);
-
+ up_write(&current->signal->exec_update_lock);
return atomic_read(&shared_ctx.preparation_error);
}