Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2
3/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4/* Copyright (c) 2008-2019, IBM Corporation */
5
6#ifndef _SIW_MEM_H
7#define _SIW_MEM_H
8
9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10void siw_umem_release(struct siw_umem *umem, bool dirty);
11struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
16int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
17 enum ib_access_flags perms, int len);
18int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge,
19 struct siw_mem *mem[], enum ib_access_flags perms,
20 u32 off, int len);
21void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op);
22int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
23 u64 start, u64 len, int rights);
24void siw_mr_drop_mem(struct siw_mr *mr);
25void siw_free_mem(struct kref *ref);
26
27static inline void siw_mem_put(struct siw_mem *mem)
28{
29 kref_put(&mem->ref, siw_free_mem);
30}
31
32static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
33{
34 while (num_sge) {
35 if (*mem == NULL)
36 break;
37
38 siw_mem_put(*mem);
39 *mem = NULL;
40 mem++;
41 num_sge--;
42 }
43}
44
45#define CHUNK_SHIFT 9 /* sets number of pages per chunk */
46#define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT)
47#define CHUNK_MASK (~(PAGES_PER_CHUNK - 1))
48#define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
49
50/*
51 * siw_get_upage()
52 *
53 * Get page pointer for address on given umem.
54 *
55 * @umem: two dimensional list of page pointers
56 * @addr: user virtual address
57 */
58static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
59{
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT,
61 chunk_idx = page_idx >> CHUNK_SHIFT,
62 page_in_chunk = page_idx & ~CHUNK_MASK;
63
64 if (likely(page_idx < umem->num_pages))
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk];
66
67 return NULL;
68}
69#endif