diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-03-27 13:30:04 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-03-27 13:30:04 -0700 |
| commit | 7df48e36313029e4c0907b2023905dd7213fd678 (patch) | |
| tree | 0e1abe0db9315df72f65259bb567769fb886c0e4 /drivers | |
| parent | 8af4fad545fa4df358c8e4d12f269e460717e514 (diff) | |
| parent | e37afcb56ae070477741fe2d6e61fc0c542cce2d (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe:
- Quite a few irdma bug fixes, several user triggerable
- Fix a 0 SMAC header in ionic
- Tolerate FW errors for RAAS in bng_re
- Don't UAF in efa when printing error events
- Better handle pool exhaustion in the new bvec paths
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
RDMA/irdma: Harden depth calculation functions
RDMA/irdma: Return EINVAL for invalid arp index error
RDMA/irdma: Fix deadlock during netdev reset with active connections
RDMA/irdma: Remove reset check from irdma_modify_qp_to_err()
RDMA/irdma: Clean up unnecessary dereference of event->cm_node
RDMA/irdma: Remove a NOP wait_event() in irdma_modify_qp_roce()
RDMA/irdma: Update ibqp state to error if QP is already in error state
RDMA/irdma: Initialize free_qp completion before using it
RDMA/efa: Fix possible deadlock
RDMA/rw: Fix MR pool exhaustion in bvec RDMA READ path
RDMA/rw: Fall back to direct SGE on MR pool exhaustion
RDMA/efa: Fix use of completion ctx after free
RDMA/bng_re: Fix silent failure in HWRM version query
RDMA/ionic: Preserve and set Ethernet source MAC after ib_ud_header_init()
RDMA/irdma: Fix double free related to rereg_user_mr
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/infiniband/core/rw.c | 37 | ||||
| -rw-r--r-- | drivers/infiniband/hw/bng_re/bng_dev.c | 14 | ||||
| -rw-r--r-- | drivers/infiniband/hw/efa/efa_com.c | 88 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ionic/ionic_controlpath.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/cm.c | 29 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/uk.c | 39 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/verbs.c | 10 |
8 files changed, 123 insertions, 100 deletions
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index fc45c384833f..4fafe393a48c 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -608,14 +608,29 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, sg_offset, remote_addr, rkey, dir); - } else if (sg_cnt > 1) { + /* + * If MR init succeeded or failed for a reason other + * than pool exhaustion, that result is final. + * + * Pool exhaustion (-EAGAIN) from the max_sgl_rd + * optimization is recoverable: fall back to + * direct SGE posting. iWARP and force_mr require + * MRs unconditionally, so -EAGAIN is terminal. + */ + if (ret != -EAGAIN || + rdma_protocol_iwarp(qp->device, port_num) || + unlikely(rdma_rw_force_mr)) + goto out; + } + + if (sg_cnt > 1) ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, remote_addr, rkey, dir); - } else { + else ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, remote_addr, rkey, dir); - } +out: if (ret < 0) goto out_unmap_sg; return ret; @@ -686,14 +701,16 @@ int rdma_rw_ctx_init_bvec(struct rdma_rw_ctx *ctx, struct ib_qp *qp, return ret; /* - * IOVA mapping not available. Check if MR registration provides - * better performance than multiple SGE entries. + * IOVA not available; fall back to the map_wrs path, which maps + * each bvec as a direct SGE. This is always correct: the MR path + * is a throughput optimization, not a correctness requirement. + * (iWARP, which does require MRs, is handled by the check above.) + * + * The rdma_rw_io_needs_mr() gate is not used here because nr_bvec + * is a raw page count that overstates DMA entry demand -- the bvec + * caller has no post-DMA-coalescing segment count, and feeding the + * inflated count into the MR path exhausts the pool on RDMA READs. */ - if (rdma_rw_io_needs_mr(dev, port_num, dir, nr_bvec)) - return rdma_rw_init_mr_wrs_bvec(ctx, qp, port_num, bvecs, - nr_bvec, &iter, remote_addr, - rkey, dir); - return rdma_rw_init_map_wrs_bvec(ctx, qp, bvecs, nr_bvec, &iter, remote_addr, rkey, dir); } diff --git a/drivers/infiniband/hw/bng_re/bng_dev.c b/drivers/infiniband/hw/bng_re/bng_dev.c index d34b5f88cd40..71a7ca2196ad 100644 --- a/drivers/infiniband/hw/bng_re/bng_dev.c +++ b/drivers/infiniband/hw/bng_re/bng_dev.c @@ -210,7 +210,7 @@ static int bng_re_stats_ctx_alloc(struct bng_re_dev *rdev) return rc; } -static void bng_re_query_hwrm_version(struct bng_re_dev *rdev) +static int bng_re_query_hwrm_version(struct bng_re_dev *rdev) { struct bnge_auxr_dev *aux_dev = rdev->aux_dev; struct hwrm_ver_get_output ver_get_resp = {}; @@ -230,7 +230,7 @@ static void bng_re_query_hwrm_version(struct bng_re_dev *rdev) if (rc) { ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", rc); - return; + return rc; } cctx = rdev->chip_ctx; @@ -244,6 +244,8 @@ static void bng_re_query_hwrm_version(struct bng_re_dev *rdev) if (!cctx->hwrm_cmd_max_timeout) cctx->hwrm_cmd_max_timeout = BNG_ROCE_FW_MAX_TIMEOUT; + + return 0; } static void bng_re_dev_uninit(struct bng_re_dev *rdev) @@ -306,13 +308,15 @@ static int bng_re_dev_init(struct bng_re_dev *rdev) goto msix_ctx_fail; } - bng_re_query_hwrm_version(rdev); + rc = bng_re_query_hwrm_version(rdev); + if (rc) + goto destroy_chip_ctx; rc = bng_re_alloc_fw_channel(&rdev->bng_res, &rdev->rcfw); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate RCFW Channel: %#x\n", rc); - goto alloc_fw_chl_fail; + goto destroy_chip_ctx; } /* Allocate nq record memory */ @@ -391,7 +395,7 @@ free_rcfw: kfree(rdev->nqr); nq_alloc_fail: bng_re_free_rcfw_channel(&rdev->rcfw); -alloc_fw_chl_fail: +destroy_chip_ctx: bng_re_destroy_chip_ctx(rdev); msix_ctx_fail: bnge_unregister_dev(rdev->aux_dev); diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 229b0ad3b0cb..e97b5f0d7003 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2026 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include <linux/log2.h> @@ -310,23 +310,19 @@ static inline struct efa_comp_ctx *efa_com_get_comp_ctx_by_cmd_id(struct efa_com return &aq->comp_ctx[ctx_id]; } -static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq, - struct efa_admin_aq_entry *cmd, - size_t cmd_size_in_bytes, - struct efa_admin_acq_entry *comp, - size_t comp_size_in_bytes) +static void __efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq, + struct efa_comp_ctx *comp_ctx, + struct efa_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct efa_admin_acq_entry *comp, + size_t comp_size_in_bytes) { struct efa_admin_aq_entry *aqe; - struct efa_comp_ctx *comp_ctx; u16 queue_size_mask; u16 cmd_id; u16 ctx_id; u16 pi; - comp_ctx = efa_com_alloc_comp_ctx(aq); - if (!comp_ctx) - return ERR_PTR(-EINVAL); - queue_size_mask = aq->depth - 1; pi = aq->sq.pc & queue_size_mask; ctx_id = efa_com_get_comp_ctx_id(aq, comp_ctx); @@ -360,8 +356,6 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu /* barrier not needed in case of writel */ writel(aq->sq.pc, aq->sq.db_addr); - - return comp_ctx; } static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq) @@ -394,28 +388,25 @@ static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq) return 0; } -static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq, - struct efa_admin_aq_entry *cmd, - size_t cmd_size_in_bytes, - struct efa_admin_acq_entry *comp, - size_t comp_size_in_bytes) +static int efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq, + struct efa_comp_ctx *comp_ctx, + struct efa_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct efa_admin_acq_entry *comp, + size_t comp_size_in_bytes) { - struct efa_comp_ctx *comp_ctx; - spin_lock(&aq->sq.lock); if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n"); spin_unlock(&aq->sq.lock); - return ERR_PTR(-ENODEV); + return -ENODEV; } - comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp, - comp_size_in_bytes); + __efa_com_submit_admin_cmd(aq, comp_ctx, cmd, cmd_size_in_bytes, comp, + comp_size_in_bytes); spin_unlock(&aq->sq.lock); - if (IS_ERR(comp_ctx)) - clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); - return comp_ctx; + return 0; } static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, @@ -512,7 +503,6 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c { unsigned long timeout; unsigned long flags; - int err; timeout = jiffies + usecs_to_jiffies(aq->completion_timeout); @@ -532,24 +522,20 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c atomic64_inc(&aq->stats.no_completion); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); - err = -ETIME; - goto out; + return -ETIME; } msleep(aq->poll_interval); } - err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status); -out: - efa_com_dealloc_comp_ctx(aq, comp_ctx); - return err; + return efa_com_comp_status_to_errno( + comp_ctx->user_cqe->acq_common_descriptor.status); } static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx, struct efa_com_admin_queue *aq) { unsigned long flags; - int err; wait_for_completion_timeout(&comp_ctx->wait_event, usecs_to_jiffies(aq->completion_timeout)); @@ -585,14 +571,11 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com aq->cq.cc); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); - err = -ETIME; - goto out; + return -ETIME; } - err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status); -out: - efa_com_dealloc_comp_ctx(aq, comp_ctx); - return err; + return efa_com_comp_status_to_errno( + comp_ctx->user_cqe->acq_common_descriptor.status); } /* @@ -642,30 +625,39 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n", efa_com_cmd_str(cmd->aq_common_descriptor.opcode), cmd->aq_common_descriptor.opcode); - comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); - if (IS_ERR(comp_ctx)) { + + comp_ctx = efa_com_alloc_comp_ctx(aq); + if (!comp_ctx) { + clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); + up(&aq->avail_cmds); + return -EINVAL; + } + + err = efa_com_submit_admin_cmd(aq, comp_ctx, cmd, cmd_size, comp, comp_size); + if (err) { ibdev_err_ratelimited( aq->efa_dev, - "Failed to submit command %s (opcode %u) err %pe\n", + "Failed to submit command %s (opcode %u) err %d\n", efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode, comp_ctx); + cmd->aq_common_descriptor.opcode, err); + efa_com_dealloc_comp_ctx(aq, comp_ctx); up(&aq->avail_cmds); atomic64_inc(&aq->stats.cmd_err); - return PTR_ERR(comp_ctx); + return err; } err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); if (err) { ibdev_err_ratelimited( aq->efa_dev, - "Failed to process command %s (opcode %u) comp_status %d err %d\n", + "Failed to process command %s (opcode %u) err %d\n", efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode, - comp_ctx->user_cqe->acq_common_descriptor.status, err); + cmd->aq_common_descriptor.opcode, err); atomic64_inc(&aq->stats.cmd_err); } + efa_com_dealloc_comp_ctx(aq, comp_ctx); up(&aq->avail_cmds); return err; diff --git a/drivers/infiniband/hw/ionic/ionic_controlpath.c b/drivers/infiniband/hw/ionic/ionic_controlpath.c index 4842931f5316..a5671da3db64 100644 --- a/drivers/infiniband/hw/ionic/ionic_controlpath.c +++ b/drivers/infiniband/hw/ionic/ionic_controlpath.c @@ -508,6 +508,7 @@ static int ionic_build_hdr(struct ionic_ibdev *dev, { const struct ib_global_route *grh; enum rdma_network_type net; + u8 smac[ETH_ALEN]; u16 vlan; int rc; @@ -518,7 +519,7 @@ static int ionic_build_hdr(struct ionic_ibdev *dev, grh = rdma_ah_read_grh(attr); - rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]); + rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, smac); if (rc) return rc; @@ -536,6 +537,7 @@ static int ionic_build_hdr(struct ionic_ibdev *dev, if (rc) return rc; + ether_addr_copy(hdr->eth.smac_h, smac); ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac); if (net == RDMA_NETWORK_IPV4) { diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 3d084d4ff577..91c0e7298283 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -2241,11 +2241,12 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, int oldarpindex; int arpindex; struct net_device *netdev = iwdev->netdev; + int ret; /* create an hte and cm_node for this instance */ cm_node = kzalloc_obj(*cm_node, GFP_ATOMIC); if (!cm_node) - return NULL; + return ERR_PTR(-ENOMEM); /* set our node specific transport info */ cm_node->ipv4 = cm_info->ipv4; @@ -2348,8 +2349,10 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, arpindex = -EINVAL; } - if (arpindex < 0) + if (arpindex < 0) { + ret = -EINVAL; goto err; + } ether_addr_copy(cm_node->rem_mac, iwdev->rf->arp_table[arpindex].mac_addr); @@ -2360,7 +2363,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, err: kfree(cm_node); - return NULL; + return ERR_PTR(ret); } static void irdma_destroy_connection(struct irdma_cm_node *cm_node) @@ -3021,8 +3024,8 @@ static int irdma_create_cm_node(struct irdma_cm_core *cm_core, /* create a CM connection node */ cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL); - if (!cm_node) - return -ENOMEM; + if (IS_ERR(cm_node)) + return PTR_ERR(cm_node); /* set our node side to client (active) side */ cm_node->tcp_cntxt.client = 1; @@ -3219,9 +3222,9 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf) cm_info.cm_id = listener->cm_id; cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info, listener); - if (!cm_node) { + if (IS_ERR(cm_node)) { ibdev_dbg(&cm_core->iwdev->ibdev, - "CM: allocate node failed\n"); + "CM: allocate node failed ret=%ld\n", PTR_ERR(cm_node)); refcount_dec(&listener->refcnt); return; } @@ -4239,21 +4242,21 @@ static void irdma_cm_event_handler(struct work_struct *work) irdma_cm_event_reset(event); break; case IRDMA_CM_EVENT_CONNECTED: - if (!event->cm_node->cm_id || - event->cm_node->state != IRDMA_CM_STATE_OFFLOADED) + if (!cm_node->cm_id || + cm_node->state != IRDMA_CM_STATE_OFFLOADED) break; irdma_cm_event_connected(event); break; case IRDMA_CM_EVENT_MPA_REJECT: - if (!event->cm_node->cm_id || + if (!cm_node->cm_id || cm_node->state == IRDMA_CM_STATE_OFFLOADED) break; irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED); break; case IRDMA_CM_EVENT_ABORTED: - if (!event->cm_node->cm_id || - event->cm_node->state == IRDMA_CM_STATE_OFFLOADED) + if (!cm_node->cm_id || + cm_node->state == IRDMA_CM_STATE_OFFLOADED) break; irdma_event_connect_error(event); break; @@ -4263,7 +4266,7 @@ static void irdma_cm_event_handler(struct work_struct *work) break; } - irdma_rem_ref_cm_node(event->cm_node); + irdma_rem_ref_cm_node(cm_node); kfree(event); } diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index ac3721a5747a..4718acf6c6fd 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -1438,7 +1438,7 @@ exit: * irdma_round_up_wq - return round up qp wq depth * @wqdepth: wq depth in quanta to round up */ -static int irdma_round_up_wq(u32 wqdepth) +static u64 irdma_round_up_wq(u64 wqdepth) { int scount = 1; @@ -1491,15 +1491,16 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth) { - u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; + u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift; + u64 hw_quanta = + irdma_round_up_wq(((u64)sq_size << shift) + IRDMA_SQ_RSVD); - *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD); - - if (*sqdepth < min_size) - *sqdepth = min_size; - else if (*sqdepth > uk_attrs->max_hw_wq_quanta) + if (hw_quanta < min_hw_quanta) + hw_quanta = min_hw_quanta; + else if (hw_quanta > uk_attrs->max_hw_wq_quanta) return -EINVAL; + *sqdepth = hw_quanta; return 0; } @@ -1513,15 +1514,16 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth) { - u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; - - *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD); + u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift; + u64 hw_quanta = + irdma_round_up_wq(((u64)rq_size << shift) + IRDMA_RQ_RSVD); - if (*rqdepth < min_size) - *rqdepth = min_size; - else if (*rqdepth > uk_attrs->max_hw_rq_quanta) + if (hw_quanta < min_hw_quanta) + hw_quanta = min_hw_quanta; + else if (hw_quanta > uk_attrs->max_hw_rq_quanta) return -EINVAL; + *rqdepth = hw_quanta; return 0; } @@ -1535,13 +1537,16 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth) { - *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD); + u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift; + u64 hw_quanta = + irdma_round_up_wq(((u64)srq_size << shift) + IRDMA_RQ_RSVD); - if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift)) - *srqdepth = uk_attrs->min_hw_wq_size << shift; - else if (*srqdepth > uk_attrs->max_hw_srq_quanta) + if (hw_quanta < min_hw_quanta) + hw_quanta = min_hw_quanta; + else if (hw_quanta > uk_attrs->max_hw_srq_quanta) return -EINVAL; + *srqdepth = hw_quanta; return 0; } diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index ab8c5284d4be..495e5daff4b4 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -2322,8 +2322,6 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp) struct irdma_qp *qp = sc_qp->qp_uk.back_qp; struct ib_qp_attr attr; - if (qp->iwdev->rf->reset) - return; attr.qp_state = IB_QPS_ERR; if (rdma_protocol_roce(qp->ibqp.device, 1)) diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 7251cd7a2147..95f590c10c05 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -558,7 +558,8 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) } irdma_qp_rem_ref(&iwqp->ibqp); - wait_for_completion(&iwqp->free_qp); + if (!iwdev->rf->reset) + wait_for_completion(&iwqp->free_qp); irdma_free_lsmm_rsrc(iwqp); irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); @@ -1105,6 +1106,7 @@ static int irdma_create_qp(struct ib_qp *ibqp, spin_lock_init(&iwqp->sc_qp.pfpdu.lock); iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; rf->qp_table[qp_num] = iwqp; + init_completion(&iwqp->free_qp); if (udata) { /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */ @@ -1129,7 +1131,6 @@ static int irdma_create_qp(struct ib_qp *ibqp, } } - init_completion(&iwqp->free_qp); return 0; error: @@ -1462,8 +1463,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, ctx_info->remote_atomics_en = true; } - wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); - ibdev_dbg(&iwdev->ibdev, "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n", __builtin_return_address(0), ibqp->qp_num, attr->qp_state, @@ -1540,6 +1539,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, case IB_QPS_ERR: case IB_QPS_RESET: if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { + iwqp->ibqp_state = attr->qp_state; spin_unlock_irqrestore(&iwqp->lock, flags); if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, @@ -1745,6 +1745,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, case IB_QPS_ERR: case IB_QPS_RESET: if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { + iwqp->ibqp_state = attr->qp_state; spin_unlock_irqrestore(&iwqp->lock, flags); if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, @@ -3723,6 +3724,7 @@ static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, err: ib_umem_release(region); + iwmr->region = NULL; return err; } |
