summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2026-02-05 08:44:31 -0700
committerJens Axboe <axboe@kernel.dk>2026-02-05 08:44:31 -0700
commit7e210d5e934c8af0f74a1e1b4852a8996a6cb5a5 (patch)
tree09fc8b4daf23a0bce36d3c71a29832d9477f277c
parent4da7c5c3ec34d839bba6e035c3d05c447a2f9d4f (diff)
parent52a0a98549344ca20ad81a4176d68d28e3c05a5c (diff)
Merge tag 'nvme-6.19-2026-02-05' of git://git.infradead.org/nvme into block-6.19
Pull NVMe fixes from Keith: "- Fix NULL pointer access setting up dma mappings (Keith) - Fix invalid memory access from malformed TCP PDU (YunJe)" * tag 'nvme-6.19-2026-02-05' of git://git.infradead.org/nvme: nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec nvme-pci: handle changing device dma map requirements
-rw-r--r--drivers/nvme/host/pci.c45
-rw-r--r--drivers/nvme/target/tcp.c17
2 files changed, 47 insertions, 15 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c2bee32332fe..d86f2565a92c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req)
nvme_free_descriptors(req);
}
+static bool nvme_pci_prp_save_mapping(struct request *req,
+ struct device *dma_dev,
+ struct blk_dma_iter *iter)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
+ return true;
+
+ if (!iod->nr_dma_vecs) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
+ GFP_ATOMIC);
+ if (!iod->dma_vecs) {
+ iter->status = BLK_STS_RESOURCE;
+ return false;
+ }
+ }
+
+ iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
+ iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
+ iod->nr_dma_vecs++;
+ return true;
+}
+
static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter)
{
@@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
return true;
if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
return false;
- if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
- iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
- iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
- iod->nr_dma_vecs++;
- }
- return true;
+ return nvme_pci_prp_save_mapping(req, dma_dev, iter);
}
static blk_status_t nvme_pci_setup_data_prp(struct request *req,
@@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req,
unsigned int prp_len, i;
__le64 *prp_list;
- if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
- iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
- GFP_ATOMIC);
- if (!iod->dma_vecs)
- return BLK_STS_RESOURCE;
- iod->dma_vecs[0].addr = iter->addr;
- iod->dma_vecs[0].len = iter->len;
- iod->nr_dma_vecs = 1;
- }
+ if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
+ return iter->status;
/*
* PRP1 always points to the start of the DMA transfers.
@@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->nr_descriptors = 0;
iod->total_len = 0;
iod->meta_total_len = 0;
+ iod->nr_dma_vecs = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 549a4786d1c3..bda816d66846 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -349,11 +349,14 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
cmd->req.sg = NULL;
}
+static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
+
static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
u32 length, offset, sg_offset;
+ unsigned int sg_remaining;
int nr_pages;
length = cmd->pdu_len;
@@ -361,9 +364,22 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
+ if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
+ nvmet_tcp_fatal_error(cmd->queue);
+ return;
+ }
sg = &cmd->req.sg[cmd->sg_idx];
+ sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
while (length) {
+ if (!sg_remaining) {
+ nvmet_tcp_fatal_error(cmd->queue);
+ return;
+ }
+ if (!sg->length || sg->length <= sg_offset) {
+ nvmet_tcp_fatal_error(cmd->queue);
+ return;
+ }
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
bvec_set_page(iov, sg_page(sg), iov_len,
@@ -371,6 +387,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length -= iov_len;
sg = sg_next(sg);
+ sg_remaining--;
iov++;
sg_offset = 0;
}