[PATCH RFC PKS/PMEM 10/58] drivers/rdma: Utilize new kmap_thread()
ira.weiny at intel.com
ira.weiny at intel.com
Fri Oct 9 19:49:45 UTC 2020
From: Ira Weiny <ira.weiny at intel.com>
The kmap() calls in these drivers are localized to a single thread. To
avoid the over head of global PKRS updates use the new kmap_thread()
call.
Cc: Mike Marciniszyn <mike.marciniszyn at intel.com>
Cc: Dennis Dalessandro <dennis.dalessandro at intel.com>
Cc: Doug Ledford <dledford at redhat.com>
Cc: Jason Gunthorpe <jgg at ziepe.ca>
Cc: Faisal Latif <faisal.latif at intel.com>
Cc: Shiraz Saleem <shiraz.saleem at intel.com>
Cc: Bernard Metzler <bmt at zurich.ibm.com>
Signed-off-by: Ira Weiny <ira.weiny at intel.com>
---
drivers/infiniband/hw/hfi1/sdma.c | 4 ++--
drivers/infiniband/hw/i40iw/i40iw_cm.c | 10 +++++-----
drivers/infiniband/sw/siw/siw_qp_tx.c | 14 +++++++-------
3 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 04575c9afd61..09d206e3229a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3130,7 +3130,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
}
if (type == SDMA_MAP_PAGE) {
- kvaddr = kmap(page);
+ kvaddr = kmap_thread(page);
kvaddr += offset;
} else if (WARN_ON(!kvaddr)) {
__sdma_txclean(dd, tx);
@@ -3140,7 +3140,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
tx->coalesce_idx += len;
if (type == SDMA_MAP_PAGE)
- kunmap(page);
+ kunmap_thread(page);
/* If there is more data, return */
if (tx->tlen - tx->coalesce_idx)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index a3b95805c154..122d7a5642a1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3721,7 +3721,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ibmr->device = iwpd->ibpd.device;
iwqp->lsmm_mr = ibmr;
if (iwqp->page)
- iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ iwqp->sc_qp.qp_uk.sq_base = kmap_thread(iwqp->page);
dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
iwqp->ietf_mem.va,
(accept.size + conn_param->private_data_len),
@@ -3729,12 +3729,12 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} else {
if (iwqp->page)
- iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ iwqp->sc_qp.qp_uk.sq_base = kmap_thread(iwqp->page);
dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
}
if (iwqp->page)
- kunmap(iwqp->page);
+ kunmap_thread(iwqp->page);
iwqp->cm_id = cm_id;
cm_node->cm_id = cm_id;
@@ -4102,10 +4102,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
i40iw_cm_init_tsa_conn(iwqp, cm_node);
read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
if (iwqp->page)
- iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ iwqp->sc_qp.qp_uk.sq_base = kmap_thread(iwqp->page);
dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
if (iwqp->page)
- kunmap(iwqp->page);
+ kunmap_thread(iwqp->page);
memset(&attr, 0, sizeof(attr));
attr.qp_state = IB_QPS_RTS;
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index d19d8325588b..4ed37c328d02 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -76,7 +76,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (unlikely(!p))
return -EFAULT;
- buffer = kmap(p);
+ buffer = kmap_thread(p);
if (likely(PAGE_SIZE - off >= bytes)) {
memcpy(paddr, buffer + off, bytes);
@@ -84,7 +84,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
unsigned long part = bytes - (PAGE_SIZE - off);
memcpy(paddr, buffer + off, part);
- kunmap(p);
+ kunmap_thread(p);
if (!mem->is_pbl)
p = siw_get_upage(mem->umem,
@@ -96,10 +96,10 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (unlikely(!p))
return -EFAULT;
- buffer = kmap(p);
+ buffer = kmap_thread(p);
memcpy(paddr + part, buffer, bytes - part);
}
- kunmap(p);
+ kunmap_thread(p);
}
}
return (int)bytes;
@@ -505,7 +505,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
page_array[seg] = p;
if (!c_tx->use_sendpage) {
- iov[seg].iov_base = kmap(p) + fp_off;
+ iov[seg].iov_base = kmap_thread(p) + fp_off;
iov[seg].iov_len = plen;
/* Remember for later kunmap() */
@@ -518,9 +518,9 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
plen);
} else if (do_crc) {
crypto_shash_update(c_tx->mpa_crc_hd,
- kmap(p) + fp_off,
+ kmap_thread(p) + fp_off,
plen);
- kunmap(p);
+ kunmap_thread(p);
}
} else {
u64 va = sge->laddr + sge_off;
--
2.28.0.rc0.12.gb6a658bd00c9
More information about the amd-gfx
mailing list