blob: 565a850445414d4de6fae1de3d0c96ea80b8f3ab [file] [log] [blame]
Leon Romanovsky6bf9d8f2020-07-19 10:25:21 +03001/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08002/*
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
Jianxin Xiong368c0152020-12-15 13:27:13 -08004 * Copyright (c) 2020 Intel Corporation. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08005 */
6
7#ifndef IB_UMEM_H
8#define IB_UMEM_H
9
10#include <linux/list.h>
11#include <linux/scatterlist.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040012#include <linux/workqueue.h>
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +020013#include <rdma/ib_verbs.h>
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080014
15struct ib_ucontext;
Shachar Raindel8ada2c12014-12-11 17:04:17 +020016struct ib_umem_odp;
Jianxin Xiong368c0152020-12-15 13:27:13 -080017struct dma_buf_attach_ops;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080018
19struct ib_umem {
Jason Gunthorpe47f725e2019-08-06 20:15:44 -030020 struct ib_device *ibdev;
Jason Gunthorped4b4dd1b2018-09-16 20:44:45 +030021 struct mm_struct *owning_mm;
Jason Gunthorpea665aca2020-09-04 19:41:47 -030022 u64 iova;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080023 size_t length;
Haggai Eran406f9e52014-12-11 17:04:12 +020024 unsigned long address;
Jason Gunthorpe597ecc52018-09-16 20:48:06 +030025 u32 writable : 1;
Jason Gunthorpe597ecc52018-09-16 20:48:06 +030026 u32 is_odp : 1;
Jianxin Xiong368c0152020-12-15 13:27:13 -080027 u32 is_dmabuf : 1;
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +030028 struct sg_append_table sgt_append;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080029};
30
Jianxin Xiong368c0152020-12-15 13:27:13 -080031struct ib_umem_dmabuf {
32 struct ib_umem umem;
33 struct dma_buf_attachment *attach;
34 struct sg_table *sgt;
35 struct scatterlist *first_sg;
36 struct scatterlist *last_sg;
37 unsigned long first_sg_offset;
38 unsigned long last_sg_trim;
39 void *private;
Gal Pressman1e4df4a2021-10-12 15:09:02 +030040 u8 pinned : 1;
Jianxin Xiong368c0152020-12-15 13:27:13 -080041};
42
43static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
44{
45 return container_of(umem, struct ib_umem_dmabuf, umem);
46}
47
Haggai Eran406f9e52014-12-11 17:04:12 +020048/* Returns the offset of the umem start relative to the first page. */
49static inline int ib_umem_offset(struct ib_umem *umem)
50{
Jason Gunthorped2183c62019-05-20 09:05:25 +030051 return umem->address & ~PAGE_MASK;
Haggai Eran406f9e52014-12-11 17:04:12 +020052}
53
Jason Gunthorpeb045db62020-11-15 13:43:05 +020054static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
55 unsigned long pgsz)
56{
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +030057 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
Jason Gunthorpeb045db62020-11-15 13:43:05 +020058 (pgsz - 1);
59}
60
Jason Gunthorpea665aca2020-09-04 19:41:47 -030061static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
62 unsigned long pgsz)
63{
64 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
65 ALIGN_DOWN(umem->iova, pgsz))) /
66 pgsz;
67}
68
Haggai Eran406f9e52014-12-11 17:04:12 +020069static inline size_t ib_umem_num_pages(struct ib_umem *umem)
70{
Jason Gunthorpea665aca2020-09-04 19:41:47 -030071 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
Haggai Eran406f9e52014-12-11 17:04:12 +020072}
73
Jason Gunthorpeebc24092020-09-04 19:41:45 -030074static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
75 struct ib_umem *umem,
76 unsigned long pgsz)
77{
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +030078 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
79 umem->sgt_append.sgt.nents, pgsz);
Mike Marciniszyn4fbc3a52023-11-29 14:21:41 -060080 biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
81 biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
82}
83
84static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
85{
86 return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
Jason Gunthorpeebc24092020-09-04 19:41:45 -030087}
88
89/**
90 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
91 * @umem: umem to iterate over
92 * @pgsz: Page size to split the list into
93 *
94 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
95 * returned DMA blocks will be aligned to pgsz and span the range:
96 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
Jason Gunthorpea665aca2020-09-04 19:41:47 -030097 *
98 * Performs exactly ib_umem_num_dma_blocks() iterations.
Jason Gunthorpeebc24092020-09-04 19:41:45 -030099 */
100#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
101 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
Mike Marciniszyn4fbc3a52023-11-29 14:21:41 -0600102 __rdma_umem_block_iter_next(biter);)
Jason Gunthorpeebc24092020-09-04 19:41:45 -0300103
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800104#ifdef CONFIG_INFINIBAND_USER_MEM
105
Moni Shouac320e522020-01-15 14:43:31 +0200106struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
Christoph Hellwig72b894b2019-11-13 08:32:14 +0100107 size_t size, int access);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800108void ib_umem_release(struct ib_umem *umem);
Haggai Eranc5d76f12014-12-11 17:04:13 +0200109int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
110 size_t length);
Shiraz Saleem4a353392019-05-06 08:53:32 -0500111unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
112 unsigned long pgsz_bitmap,
113 unsigned long virt);
Jianxin Xiong368c0152020-12-15 13:27:13 -0800114
Jason Gunthorpeb045db62020-11-15 13:43:05 +0200115/**
116 * ib_umem_find_best_pgoff - Find best HW page size
117 *
118 * @umem: umem struct
119 * @pgsz_bitmap bitmap of HW supported page sizes
120 * @pgoff_bitmask: Mask of bits that can be represented with an offset
121 *
122 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
123 * an IOVA it accepts a bitmask specifying what address bits can be represented
124 * with a page offset.
125 *
126 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
127 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
128 * "111111000000".
129 *
130 * If the pgoff_bitmask requires either alignment in the low bit or an
131 * unavailable page size for the high bits, this function returns 0.
132 */
133static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
134 unsigned long pgsz_bitmap,
135 u64 pgoff_bitmask)
136{
Maor Gottlieb79fbd3e2021-08-24 17:25:31 +0300137 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
Jason Gunthorpeb045db62020-11-15 13:43:05 +0200138 dma_addr_t dma_addr;
139
140 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
141 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
142 dma_addr & pgoff_bitmask);
143}
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800144
Jianxin Xiong368c0152020-12-15 13:27:13 -0800145struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
146 unsigned long offset, size_t size,
147 int fd, int access,
148 const struct dma_buf_attach_ops *ops);
Gal Pressman1e4df4a2021-10-12 15:09:02 +0300149struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
150 unsigned long offset,
151 size_t size, int fd,
152 int access);
Jianxin Xiong368c0152020-12-15 13:27:13 -0800153int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
154void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
155void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
156
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800157#else /* CONFIG_INFINIBAND_USER_MEM */
158
159#include <linux/err.h>
160
Moni Shouac320e522020-01-15 14:43:31 +0200161static inline struct ib_umem *ib_umem_get(struct ib_device *device,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800162 unsigned long addr, size_t size,
Christoph Hellwig72b894b2019-11-13 08:32:14 +0100163 int access)
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200164{
Jianxin Xiong368c0152020-12-15 13:27:13 -0800165 return ERR_PTR(-EOPNOTSUPP);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800166}
167static inline void ib_umem_release(struct ib_umem *umem) { }
Haggai Eranc1395a22014-12-11 17:04:14 +0200168static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
169 size_t length) {
Jianxin Xiong368c0152020-12-15 13:27:13 -0800170 return -EOPNOTSUPP;
Haggai Eranc1395a22014-12-11 17:04:14 +0200171}
Jason Gunthorpe61690d02020-08-25 15:17:08 -0300172static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
173 unsigned long pgsz_bitmap,
174 unsigned long virt)
175{
176 return 0;
Shiraz Saleem4a353392019-05-06 08:53:32 -0500177}
Jason Gunthorpeb045db62020-11-15 13:43:05 +0200178static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
179 unsigned long pgsz_bitmap,
180 u64 pgoff_bitmask)
181{
182 return 0;
183}
Jianxin Xiong368c0152020-12-15 13:27:13 -0800184static inline
185struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
186 unsigned long offset,
187 size_t size, int fd,
188 int access,
189 struct dma_buf_attach_ops *ops)
190{
191 return ERR_PTR(-EOPNOTSUPP);
192}
Gal Pressman1e4df4a2021-10-12 15:09:02 +0300193static inline struct ib_umem_dmabuf *
194ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
195 size_t size, int fd, int access)
196{
197 return ERR_PTR(-EOPNOTSUPP);
198}
Jianxin Xiong368c0152020-12-15 13:27:13 -0800199static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
200{
201 return -EOPNOTSUPP;
202}
203static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
204static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
Shiraz Saleem4a353392019-05-06 08:53:32 -0500205
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800206#endif /* CONFIG_INFINIBAND_USER_MEM */
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800207#endif /* IB_UMEM_H */