summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-21 11:02:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-21 11:02:58 -0800
commit8934827db5403eae57d4537114a9ff88b0a8460f (patch)
tree5167aa7e16b786b9135e19d508b234054fa6e8ce /io_uring
parentc7decec2f2d2ab0366567f9e30c0e1418cece43f (diff)
parent7a70c15bd1449f1eb30991772edce37b41e496fb (diff)
Merge tag 'kmalloc_obj-treewide-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kmalloc_obj conversion from Kees Cook: "This does the tree-wide conversion to kmalloc_obj() and friends using coccinelle, with a subsequent small manual cleanup of whitespace alignment that coccinelle does not handle. This uncovered a clang bug in __builtin_counted_by_ref(), so the conversion is preceded by disabling that for current versions of clang. The imminent clang 22.1 release has the fix. I've done allmodconfig build tests for x86_64, arm64, i386, and arm. I did defconfig builds for alpha, m68k, mips, parisc, powerpc, riscv, s390, sparc, sh, arc, csky, xtensa, hexagon, and openrisc" * tag 'kmalloc_obj-treewide-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: kmalloc_obj: Clean up after treewide replacements treewide: Replace kmalloc with kmalloc_obj for non-scalar types compiler_types: Disable __builtin_counted_by_ref for Clang
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/bpf_filter.c9
-rw-r--r--io_uring/eventfd.c2
-rw-r--r--io_uring/futex.c4
-rw-r--r--io_uring/io-wq.c4
-rw-r--r--io_uring/io_uring.c8
-rw-r--r--io_uring/kbuf.c8
-rw-r--r--io_uring/memmap.c4
-rw-r--r--io_uring/mock_file.c4
-rw-r--r--io_uring/poll.c4
-rw-r--r--io_uring/register.c4
-rw-r--r--io_uring/rsrc.c11
-rw-r--r--io_uring/sqpoll.c2
-rw-r--r--io_uring/tctx.c8
-rw-r--r--io_uring/xattr.c4
-rw-r--r--io_uring/zcrx.c10
15 files changed, 42 insertions, 44 deletions
diff --git a/io_uring/bpf_filter.c b/io_uring/bpf_filter.c
index 28a23e92ee81..6a98750e38b0 100644
--- a/io_uring/bpf_filter.c
+++ b/io_uring/bpf_filter.c
@@ -152,13 +152,12 @@ static struct io_bpf_filters *io_new_bpf_filters(void)
{
struct io_bpf_filters *filters __free(kfree) = NULL;
- filters = kzalloc(sizeof(*filters), GFP_KERNEL_ACCOUNT);
+ filters = kzalloc_obj(*filters, GFP_KERNEL_ACCOUNT);
if (!filters)
return ERR_PTR(-ENOMEM);
- filters->filters = kcalloc(IORING_OP_LAST,
- sizeof(struct io_bpf_filter *),
- GFP_KERNEL_ACCOUNT);
+ filters->filters = kzalloc_objs(struct io_bpf_filter *, IORING_OP_LAST,
+ GFP_KERNEL_ACCOUNT);
if (!filters->filters)
return ERR_PTR(-ENOMEM);
@@ -402,7 +401,7 @@ int io_register_bpf_filter(struct io_restriction *res,
old_filters = res->bpf_filters;
}
- filter = kzalloc(sizeof(*filter), GFP_KERNEL_ACCOUNT);
+ filter = kzalloc_obj(*filter, GFP_KERNEL_ACCOUNT);
if (!filter) {
ret = -ENOMEM;
goto err;
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
index 78f8ab7db104..0120ecd97321 100644
--- a/io_uring/eventfd.c
+++ b/io_uring/eventfd.c
@@ -127,7 +127,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
if (copy_from_user(&fd, fds, sizeof(*fds)))
return -EFAULT;
- ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
+ ev_fd = kmalloc_obj(*ev_fd, GFP_KERNEL);
if (!ev_fd)
return -ENOMEM;
diff --git a/io_uring/futex.c b/io_uring/futex.c
index 1dabcfd503b8..fd503c24b428 100644
--- a/io_uring/futex.c
+++ b/io_uring/futex.c
@@ -185,8 +185,8 @@ int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!iof->futex_nr || iof->futex_nr > FUTEX_WAITV_MAX)
return -EINVAL;
- ifd = kzalloc(struct_size_t(struct io_futexv_data, futexv, iof->futex_nr),
- GFP_KERNEL_ACCOUNT);
+ ifd = kzalloc_flex(struct io_futexv_data, futexv, iof->futex_nr,
+ GFP_KERNEL_ACCOUNT);
if (!ifd)
return -ENOMEM;
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 7ed04911f7b9..2d04ff565920 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -897,7 +897,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wq_acct *acct)
__set_current_state(TASK_RUNNING);
- worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+ worker = kzalloc_obj(*worker, GFP_KERNEL);
if (!worker) {
fail:
atomic_dec(&acct->nr_running);
@@ -1255,7 +1255,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
if (WARN_ON_ONCE(!bounded))
return ERR_PTR(-EINVAL);
- wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
+ wq = kzalloc_obj(struct io_wq, GFP_KERNEL);
if (!wq)
return ERR_PTR(-ENOMEM);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ccab8562d273..3c64c458a281 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -195,8 +195,8 @@ static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
do {
hash_buckets = 1U << bits;
- table->hbs = kvmalloc_array(hash_buckets, sizeof(table->hbs[0]),
- GFP_KERNEL_ACCOUNT);
+ table->hbs = kvmalloc_objs(table->hbs[0], hash_buckets,
+ GFP_KERNEL_ACCOUNT);
if (table->hbs)
break;
if (bits == 1)
@@ -226,7 +226,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
int hash_bits;
bool ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc_obj(*ctx, GFP_KERNEL);
if (!ctx)
return NULL;
@@ -1330,7 +1330,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
bool drain = req->flags & IOSQE_IO_DRAIN;
struct io_defer_entry *de;
- de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT);
+ de = kmalloc_obj(*de, GFP_KERNEL_ACCOUNT);
if (!de) {
io_req_defer_failed(req, -ENOMEM);
return;
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index dae5b4ab3819..aea1794dc5f6 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -265,7 +265,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
* a speculative peek operation.
*/
if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
- iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
+ iov = kmalloc_objs(struct iovec, nr_avail, GFP_KERNEL);
if (unlikely(!iov))
return -ENOMEM;
if (arg->mode & KBUF_MODE_FREE)
@@ -532,7 +532,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
ret = -EOVERFLOW;
break;
}
- buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
+ buf = kmalloc_obj(*buf, GFP_KERNEL_ACCOUNT);
if (!buf)
break;
@@ -559,7 +559,7 @@ static int __io_manage_buffers_legacy(struct io_kiocb *req,
if (!bl) {
if (req->opcode != IORING_OP_PROVIDE_BUFFERS)
return -ENOENT;
- bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
+ bl = kzalloc_obj(*bl, GFP_KERNEL_ACCOUNT);
if (!bl)
return -ENOMEM;
@@ -628,7 +628,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
io_destroy_bl(ctx, bl);
}
- bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
+ bl = kzalloc_obj(*bl, GFP_KERNEL_ACCOUNT);
if (!bl)
return -ENOMEM;
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 89f56609e50a..e6958968975a 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -56,7 +56,7 @@ struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
if (WARN_ON_ONCE(nr_pages > INT_MAX))
return ERR_PTR(-EOVERFLOW);
- pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
+ pages = kvmalloc_objs(struct page *, nr_pages, GFP_KERNEL_ACCOUNT);
if (!pages)
return ERR_PTR(-ENOMEM);
@@ -158,7 +158,7 @@ static int io_region_allocate_pages(struct io_mapped_region *mr,
unsigned long nr_allocated;
struct page **pages;
- pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp);
+ pages = kvmalloc_objs(*pages, mr->nr_pages, gfp);
if (!pages)
return -ENOMEM;
diff --git a/io_uring/mock_file.c b/io_uring/mock_file.c
index 80c96ad2061f..221b60ad0723 100644
--- a/io_uring/mock_file.c
+++ b/io_uring/mock_file.c
@@ -115,7 +115,7 @@ static ssize_t io_mock_delay_rw(struct kiocb *iocb, size_t len)
struct io_mock_file *mf = iocb->ki_filp->private_data;
struct io_mock_iocb *mio;
- mio = kzalloc(sizeof(*mio), GFP_KERNEL);
+ mio = kzalloc_obj(*mio, GFP_KERNEL);
if (!mio)
return -ENOMEM;
@@ -242,7 +242,7 @@ static int io_create_mock_file(struct io_uring_cmd *cmd, unsigned int issue_flag
if (mc.rw_delay_ns > NSEC_PER_SEC)
return -EINVAL;
- mf = kzalloc(sizeof(*mf), GFP_KERNEL_ACCOUNT);
+ mf = kzalloc_obj(*mf, GFP_KERNEL_ACCOUNT);
if (!mf)
return -ENOMEM;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index aac4b3b881fb..b671b84657d9 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -478,7 +478,7 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
return;
}
- poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
+ poll = kmalloc_obj(*poll, GFP_ATOMIC);
if (!poll) {
pt->error = -ENOMEM;
return;
@@ -655,7 +655,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
if (!(issue_flags & IO_URING_F_UNLOCKED))
apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
else
- apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ apoll = kmalloc_obj(*apoll, GFP_ATOMIC);
if (!apoll)
return NULL;
apoll->poll.retries = APOLL_MAX_RETRY;
diff --git a/io_uring/register.c b/io_uring/register.c
index 594b1f2ce875..6015a3e9ce69 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -218,7 +218,7 @@ static int io_register_restrictions_task(void __user *arg, unsigned int nr_args)
if (!mem_is_zero(tres.resv, sizeof(tres.resv)))
return -EINVAL;
- res = kzalloc(sizeof(*res), GFP_KERNEL_ACCOUNT);
+ res = kzalloc_obj(*res, GFP_KERNEL_ACCOUNT);
if (!res)
return -ENOMEM;
@@ -250,7 +250,7 @@ static int io_register_bpf_filter_task(void __user *arg, unsigned int nr_args)
/* If no task restrictions exist, setup a new set */
res = current->io_uring_restrict;
if (!res) {
- res = kzalloc(sizeof(*res), GFP_KERNEL_ACCOUNT);
+ res = kzalloc_obj(*res, GFP_KERNEL_ACCOUNT);
if (!res)
return -ENOMEM;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 842e231c8a7c..9b799e7ba889 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -113,8 +113,7 @@ static struct io_mapped_ubuf *io_alloc_imu(struct io_ring_ctx *ctx,
{
if (nr_bvecs <= IO_CACHED_BVECS_SEGS)
return io_cache_alloc(&ctx->imu_cache, GFP_KERNEL);
- return kvmalloc(struct_size_t(struct io_mapped_ubuf, bvec, nr_bvecs),
- GFP_KERNEL);
+ return kvmalloc_flex(struct io_mapped_ubuf, bvec, nr_bvecs, GFP_KERNEL);
}
static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
@@ -200,8 +199,8 @@ __cold void io_rsrc_data_free(struct io_ring_ctx *ctx,
__cold int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr)
{
- data->nodes = kvmalloc_array(nr, sizeof(struct io_rsrc_node *),
- GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ data->nodes = kvmalloc_objs(struct io_rsrc_node *, nr,
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (data->nodes) {
data->nr = nr;
return 0;
@@ -684,7 +683,7 @@ static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
unsigned i, j;
/* Store head pages only*/
- new_array = kvmalloc_array(nr_folios, sizeof(struct page *), GFP_KERNEL);
+ new_array = kvmalloc_objs(struct page *, nr_folios, GFP_KERNEL);
if (!new_array)
return false;
@@ -1310,7 +1309,7 @@ int io_vec_realloc(struct iou_vec *iv, unsigned nr_entries)
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_NOWARN;
struct iovec *iov;
- iov = kmalloc_array(nr_entries, sizeof(iov[0]), gfp);
+ iov = kmalloc_objs(iov[0], nr_entries, gfp);
if (!iov)
return -ENOMEM;
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index becdfdd323a9..97e64d7d029f 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -153,7 +153,7 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
return sqd;
}
- sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
+ sqd = kzalloc_obj(*sqd, GFP_KERNEL);
if (!sqd)
return ERR_PTR(-ENOMEM);
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 270263699c6f..fa97bc7db6a3 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -23,7 +23,7 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
mutex_lock(&ctx->uring_lock);
hash = ctx->hash_map;
if (!hash) {
- hash = kzalloc(sizeof(*hash), GFP_KERNEL);
+ hash = kzalloc_obj(*hash, GFP_KERNEL);
if (!hash) {
mutex_unlock(&ctx->uring_lock);
return ERR_PTR(-ENOMEM);
@@ -80,7 +80,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
struct io_uring_task *tctx;
int ret;
- tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
+ tctx = kzalloc_obj(*tctx, GFP_KERNEL);
if (unlikely(!tctx))
return -ENOMEM;
@@ -139,7 +139,7 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
if (tctx->io_wq)
io_wq_set_exit_on_idle(tctx->io_wq, false);
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kmalloc_obj(*node, GFP_KERNEL);
if (!node)
return -ENOMEM;
node->ctx = ctx;
@@ -378,7 +378,7 @@ int __io_uring_fork(struct task_struct *tsk)
/* Don't leave it dangling on error */
tsk->io_uring_restrict = NULL;
- res = kzalloc(sizeof(*res), GFP_KERNEL_ACCOUNT);
+ res = kzalloc_obj(*res, GFP_KERNEL_ACCOUNT);
if (!res)
return -ENOMEM;
diff --git a/io_uring/xattr.c b/io_uring/xattr.c
index ba2b98cf13f9..28475bf8ed47 100644
--- a/io_uring/xattr.c
+++ b/io_uring/xattr.c
@@ -56,7 +56,7 @@ static int __io_getxattr_prep(struct io_kiocb *req,
if (ix->ctx.flags)
return -EINVAL;
- ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
+ ix->ctx.kname = kmalloc_obj(*ix->ctx.kname, GFP_KERNEL);
if (!ix->ctx.kname)
return -ENOMEM;
@@ -133,7 +133,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
ix->ctx.size = READ_ONCE(sqe->len);
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
- ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
+ ix->ctx.kname = kmalloc_obj(*ix->ctx.kname, GFP_KERNEL);
if (!ix->ctx.kname)
return -ENOMEM;
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 97984a73a95d..affb802fa2da 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -452,7 +452,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
}
ret = -ENOMEM;
- area = kzalloc(sizeof(*area), GFP_KERNEL);
+ area = kzalloc_obj(*area, GFP_KERNEL);
if (!area)
goto err;
area->ifq = ifq;
@@ -471,8 +471,8 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
area->nia.num_niovs = nr_iovs;
ret = -ENOMEM;
- area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
- GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ area->nia.niovs = kvmalloc_objs(area->nia.niovs[0], nr_iovs,
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!area->nia.niovs)
goto err;
@@ -481,7 +481,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
if (!area->freelist)
goto err;
- area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
+ area->user_refs = kvmalloc_objs(area->user_refs[0], nr_iovs,
GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!area->user_refs)
goto err;
@@ -514,7 +514,7 @@ static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
{
struct io_zcrx_ifq *ifq;
- ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
+ ifq = kzalloc_obj(*ifq, GFP_KERNEL);
if (!ifq)
return NULL;