diff options
author | Chen Ridong <chenridong@huawei.com> | 2025-01-10 06:16:39 +0000 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-02-08 09:57:35 +0100 |
commit | 8ca38d0ca8c3d30dd18d311f1a7ec5cb56972cac (patch) | |
tree | fba51d263d3b7d894a9f29ff08adaf13c15475ca /kernel | |
parent | c629808304e8c72caaaec397b3061813abe1265b (diff) | |
download | linux-8ca38d0ca8c3d30dd18d311f1a7ec5cb56972cac.tar.gz linux-8ca38d0ca8c3d30dd18d311f1a7ec5cb56972cac.tar.bz2 linux-8ca38d0ca8c3d30dd18d311f1a7ec5cb56972cac.zip |
padata: avoid UAF for reorder_work
[ Upstream commit dd7d37ccf6b11f3d95e797ebe4e9e886d0332600 ]
Although the previous patch can avoid ps and ps UAF for _do_serial, it
can not avoid potential UAF issue for reorder_work. This issue can
happen just as below:
crypto_request crypto_request crypto_del_alg
padata_do_serial
...
padata_reorder
// processes all remaining
// requests then breaks
while (1) {
if (!padata)
break;
...
}
padata_do_serial
// new request added
list_add
// sees the new request
queue_work(reorder_work)
padata_reorder
queue_work_on(squeue->work)
...
<kworker context>
padata_serial_worker
// completes new request,
// no more outstanding
// requests
crypto_del_alg
// free pd
<kworker context>
invoke_padata_reorder
// UAF of pd
To avoid UAF for 'reorder_work', get 'pd' ref before put 'reorder_work'
into the 'serial_wq' and put 'pd' ref until the 'serial_wq' finish.
Fixes: bbefa1dd6a6d ("crypto: pcrypt - Avoid deadlock by using per-instance padata queues")
Signed-off-by: Chen Ridong <chenridong@huawei.com>
Acked-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/padata.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/padata.c b/kernel/padata.c index a4c7a6eefe10..22770372bdf3 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -352,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd) smp_mb(); reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); - if (!list_empty(&reorder->list) && padata_find_next(pd, false)) + if (!list_empty(&reorder->list) && padata_find_next(pd, false)) { + /* + * Other context(eg. the padata_serial_worker) can finish the request. + * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. + */ + padata_get_pd(pd); queue_work(pinst->serial_wq, &pd->reorder_work); + } } static void invoke_padata_reorder(struct work_struct *work) @@ -364,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work) pd = container_of(work, struct parallel_data, reorder_work); padata_reorder(pd); local_bh_enable(); + /* Pairs with putting the reorder_work in the serial_wq */ + padata_put_pd(pd); } static void padata_serial_worker(struct work_struct *serial_work) |