Skip to content

Commit 8d7c7c0

Browse files
jgunthorperleon
authored andcommitted
RDMA: Add ib_virt_dma_to_page()
Make it clearer what is going on by adding a function to go back from the "virtual" dma_addr to a kva and another to a struct page. This is used in the ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib). Call them instead of a naked casting and virt_to_page() when working with dma_addr values encoded by the various ib_map functions. This also fixes the virt_to_page() casting problem Linus Walleij has been chasing. Cc: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/0-v2-05ea785520ed+10-ib_virt_page_jgg@nvidia.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
1 parent b2b1ddc commit 8d7c7c0

6 files changed

Lines changed: 45 additions & 27 deletions

File tree

drivers/infiniband/sw/rxe/rxe_mr.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -210,10 +210,10 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
210210
return err;
211211
}
212212

213-
static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
213+
static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
214214
{
215215
struct rxe_mr *mr = to_rmr(ibmr);
216-
struct page *page = virt_to_page(iova & mr->page_mask);
216+
struct page *page = ib_virt_dma_to_page(dma_addr);
217217
bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
218218
int err;
219219

@@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
279279
return 0;
280280
}
281281

282-
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
282+
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
283283
unsigned int length, enum rxe_mr_copy_dir dir)
284284
{
285-
unsigned int page_offset = iova & (PAGE_SIZE - 1);
285+
unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
286286
unsigned int bytes;
287287
struct page *page;
288288
u8 *va;
289289

290290
while (length) {
291-
page = virt_to_page(iova & mr->page_mask);
291+
page = ib_virt_dma_to_page(dma_addr);
292292
bytes = min_t(unsigned int, length,
293293
PAGE_SIZE - page_offset);
294294
va = kmap_local_page(page);
@@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
300300

301301
kunmap_local(va);
302302
page_offset = 0;
303-
iova += bytes;
303+
dma_addr += bytes;
304304
addr += bytes;
305305
length -= bytes;
306306
}
@@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
488488

489489
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
490490
page_offset = iova & (PAGE_SIZE - 1);
491-
page = virt_to_page(iova & PAGE_MASK);
491+
page = ib_virt_dma_to_page(iova);
492492
} else {
493493
unsigned long index;
494494
int err;
@@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
545545

546546
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
547547
page_offset = iova & (PAGE_SIZE - 1);
548-
page = virt_to_page(iova & PAGE_MASK);
548+
page = ib_virt_dma_to_page(iova);
549549
} else {
550550
unsigned long index;
551551
int err;

drivers/infiniband/sw/rxe/rxe_verbs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -760,7 +760,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
760760
int i;
761761

762762
for (i = 0; i < ibwr->num_sge; i++, sge++) {
763-
memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
763+
memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
764764
p += sge->length;
765765
}
766766
}

drivers/infiniband/sw/siw/siw_qp_rx.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
139139
break;
140140

141141
bytes = min(bytes, len);
142-
if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
142+
if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
143143
bytes) {
144144
copied += bytes;
145145
offset += bytes;
@@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp)
487487
mem_p = *mem;
488488
if (mem_p->mem_obj == NULL)
489489
rv = siw_rx_kva(srx,
490-
(void *)(uintptr_t)(sge->laddr + frx->sge_off),
490+
ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
491491
sge_bytes);
492492
else if (!mem_p->is_pbl)
493493
rv = siw_rx_umem(srx, mem_p->umem,
@@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp)
852852

853853
if (mem_p->mem_obj == NULL)
854854
rv = siw_rx_kva(srx,
855-
(void *)(uintptr_t)(sge->laddr + wqe->processed),
855+
ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
856856
bytes);
857857
else if (!mem_p->is_pbl)
858858
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,

drivers/infiniband/sw/siw/siw_qp_tx.c

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
2929
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
3030

3131
if (paddr)
32-
return virt_to_page((void *)(uintptr_t)paddr);
32+
return ib_virt_dma_to_page(paddr);
3333

3434
return NULL;
3535
}
@@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
5656

5757
if (!mem->mem_obj) {
5858
/* Kernel client using kva */
59-
memcpy(paddr,
60-
(const void *)(uintptr_t)sge->laddr, bytes);
59+
memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
6160
} else if (c_tx->in_syscall) {
6261
if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
6362
bytes))
@@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
477476
* or memory region with assigned kernel buffer
478477
*/
479478
iov[seg].iov_base =
480-
(void *)(uintptr_t)(sge->laddr + sge_off);
479+
ib_virt_dma_to_ptr(sge->laddr + sge_off);
481480
iov[seg].iov_len = sge_len;
482481

483482
if (do_crc)
@@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
537536
* Cast to an uintptr_t to preserve all 64 bits
538537
* in sge->laddr.
539538
*/
540-
uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
539+
u64 va = sge->laddr + sge_off;
541540

542-
/*
543-
* virt_to_page() takes a (void *) pointer
544-
* so cast to a (void *) meaning it will be 64
545-
* bits on a 64 bit platform and 32 bits on a
546-
* 32 bit platform.
547-
*/
548-
page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
541+
page_array[seg] = ib_virt_dma_to_page(va);
549542
if (do_crc)
550543
crypto_shash_update(
551544
c_tx->mpa_crc_hd,
552-
(void *)va,
545+
ib_virt_dma_to_ptr(va),
553546
plen);
554547
}
555548

drivers/infiniband/sw/siw/siw_verbs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
660660
bytes = -EINVAL;
661661
break;
662662
}
663-
memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
663+
memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
664664
core_sge->length);
665665

666666
kbuf += core_sge->length;
@@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
15231523
}
15241524
siw_dbg_mem(mem,
15251525
"sge[%d], size %u, addr 0x%p, total %lu\n",
1526-
i, pble->size, (void *)(uintptr_t)pble->addr,
1526+
i, pble->size, ib_virt_dma_to_ptr(pble->addr),
15271527
pbl_size);
15281528
}
15291529
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);

include/rdma/ib_verbs.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4035,6 +4035,31 @@ static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
40354035
return dma_pci_p2pdma_supported(dev->dma_device);
40364036
}
40374037

4038+
/**
4039+
* ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4040+
* @dma_addr: The DMA address
4041+
*
4042+
* Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4043+
* going through the dma_addr marshalling.
4044+
*/
4045+
static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
4046+
{
4047+
/* virt_dma mode maps the kvs's directly into the dma addr */
4048+
return (void *)(uintptr_t)dma_addr;
4049+
}
4050+
4051+
/**
4052+
* ib_virt_dma_to_page - Convert a dma_addr to a struct page
4053+
* @dma_addr: The DMA address
4054+
*
4055+
* Used by ib_uses_virt_dma() device to get back to the struct page after going
4056+
* through the dma_addr marshalling.
4057+
*/
4058+
static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
4059+
{
4060+
return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
4061+
}
4062+
40384063
/**
40394064
* ib_dma_mapping_error - check a DMA addr for error
40404065
* @dev: The device for which the dma_addr was created

0 commit comments

Comments
 (0)