Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *	- Redistributions of source code must retain the above
 16 *	  copyright notice, this list of conditions and the following
 17 *	  disclaimer.
 18 *
 19 *	- Redistributions in binary form must reproduce the above
 20 *	  copyright notice, this list of conditions and the following
 21 *	  disclaimer in the documentation and/or other materials
 22 *	  provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 
 
 34#include "rxe.h"
 35#include "rxe_loc.h"
 36
 37/*
 38 * lfsr (linear feedback shift register) with period 255
 
 39 */
 40static u8 rxe_get_key(void)
 41{
 42	static u32 key = 1;
 43
 44	key = key << 1;
 45
 46	key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
 47		^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
 48
 49	key &= 0xff;
 
 
 50
 51	return key;
 52}
 53
 54int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
 55{
 56	switch (mem->type) {
 57	case RXE_MEM_TYPE_DMA:
 58		return 0;
 59
 60	case RXE_MEM_TYPE_MR:
 61	case RXE_MEM_TYPE_FMR:
 62		if (iova < mem->iova ||
 63		    length > mem->length ||
 64		    iova > mem->iova + mem->length - length)
 65			return -EFAULT;
 
 66		return 0;
 67
 68	default:
 69		return -EFAULT;
 
 70	}
 71}
 72
 73#define IB_ACCESS_REMOTE	(IB_ACCESS_REMOTE_READ		\
 74				| IB_ACCESS_REMOTE_WRITE	\
 75				| IB_ACCESS_REMOTE_ATOMIC)
 76
 77static void rxe_mem_init(int access, struct rxe_mem *mem)
 78{
 79	u32 lkey = mem->pelem.index << 8 | rxe_get_key();
 80	u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
 81
 82	if (mem->pelem.pool->type == RXE_TYPE_MR) {
 83		mem->ibmr.lkey		= lkey;
 84		mem->ibmr.rkey		= rkey;
 85	}
 
 
 
 86
 87	mem->lkey		= lkey;
 88	mem->rkey		= rkey;
 89	mem->state		= RXE_MEM_STATE_INVALID;
 90	mem->type		= RXE_MEM_TYPE_NONE;
 91	mem->map_shift		= ilog2(RXE_BUF_PER_MAP);
 92}
 93
 94void rxe_mem_cleanup(void *arg)
 95{
 96	struct rxe_mem *mem = arg;
 97	int i;
 98
 99	if (mem->umem)
100		ib_umem_release(mem->umem);
101
102	if (mem->map) {
103		for (i = 0; i < mem->num_map; i++)
104			kfree(mem->map[i]);
105
106		kfree(mem->map);
107	}
108}
109
110static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
111{
112	int i;
113	int num_map;
114	struct rxe_map **map = mem->map;
115
116	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
117
118	mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
119	if (!mem->map)
120		goto err1;
 
121
122	for (i = 0; i < num_map; i++) {
123		mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
124		if (!mem->map[i])
125			goto err2;
126	}
127
128	WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
 
 
 
129
130	mem->map_shift	= ilog2(RXE_BUF_PER_MAP);
131	mem->map_mask	= RXE_BUF_PER_MAP - 1;
 
 
 
 
132
133	mem->num_buf = num_buf;
134	mem->num_map = num_map;
135	mem->max_buf = num_map * RXE_BUF_PER_MAP;
136
137	return 0;
 
 
 
 
 
 
 
 
 
138
139err2:
140	for (i--; i >= 0; i--)
141		kfree(mem->map[i]);
 
 
 
 
 
 
142
143	kfree(mem->map);
144err1:
145	return -ENOMEM;
146}
147
148int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
149		     int access, struct rxe_mem *mem)
150{
151	rxe_mem_init(access, mem);
152
153	mem->pd			= pd;
154	mem->access		= access;
155	mem->state		= RXE_MEM_STATE_VALID;
156	mem->type		= RXE_MEM_TYPE_DMA;
157
158	return 0;
159}
160
161int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
162		      u64 length, u64 iova, int access, struct ib_udata *udata,
163		      struct rxe_mem *mem)
164{
165	int			entry;
166	struct rxe_map		**map;
167	struct rxe_phys_buf	*buf = NULL;
168	struct ib_umem		*umem;
169	struct scatterlist	*sg;
170	int			num_buf;
171	void			*vaddr;
172	int err;
173
174	umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
175	if (IS_ERR(umem)) {
176		pr_warn("err %d from rxe_umem_get\n",
177			(int)PTR_ERR(umem));
178		err = -EINVAL;
179		goto err1;
180	}
181
182	mem->umem = umem;
183	num_buf = umem->nmap;
184
185	rxe_mem_init(access, mem);
186
187	err = rxe_mem_alloc(rxe, mem, num_buf);
188	if (err) {
189		pr_warn("err %d from rxe_mem_alloc\n", err);
190		ib_umem_release(umem);
191		goto err1;
192	}
193
194	WARN_ON(!is_power_of_2(umem->page_size));
 
 
195
196	mem->page_shift		= ilog2(umem->page_size);
197	mem->page_mask		= umem->page_size - 1;
198
199	num_buf			= 0;
200	map			= mem->map;
201	if (length > 0) {
202		buf = map[0]->buf;
203
204		for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
205			vaddr = page_address(sg_page(sg));
206			if (!vaddr) {
207				pr_warn("null vaddr\n");
208				err = -ENOMEM;
209				goto err1;
210			}
211
212			buf->addr = (uintptr_t)vaddr;
213			buf->size = umem->page_size;
214			num_buf++;
215			buf++;
216
217			if (num_buf >= RXE_BUF_PER_MAP) {
218				map++;
219				buf = map[0]->buf;
220				num_buf = 0;
221			}
222		}
223	}
 
224
225	mem->pd			= pd;
226	mem->umem		= umem;
227	mem->access		= access;
228	mem->length		= length;
229	mem->iova		= iova;
230	mem->va			= start;
231	mem->offset		= ib_umem_offset(umem);
232	mem->state		= RXE_MEM_STATE_VALID;
233	mem->type		= RXE_MEM_TYPE_MR;
234
235	return 0;
236
237err1:
238	return err;
239}
240
241int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
242		      int max_pages, struct rxe_mem *mem)
243{
244	int err;
245
246	rxe_mem_init(0, mem);
247
248	/* In fastreg, we also set the rkey */
249	mem->ibmr.rkey = mem->ibmr.lkey;
250
251	err = rxe_mem_alloc(rxe, mem, max_pages);
252	if (err)
253		goto err1;
254
255	mem->pd			= pd;
256	mem->max_buf		= max_pages;
257	mem->state		= RXE_MEM_STATE_FREE;
258	mem->type		= RXE_MEM_TYPE_MR;
259
260	return 0;
261
262err1:
263	return err;
264}
265
266static void lookup_iova(
267	struct rxe_mem	*mem,
268	u64			iova,
269	int			*m_out,
270	int			*n_out,
271	size_t			*offset_out)
272{
273	size_t			offset = iova - mem->iova + mem->offset;
274	int			map_index;
275	int			buf_index;
276	u64			length;
277
278	if (likely(mem->page_shift)) {
279		*offset_out = offset & mem->page_mask;
280		offset >>= mem->page_shift;
281		*n_out = offset & mem->map_mask;
282		*m_out = offset >> mem->map_shift;
283	} else {
284		map_index = 0;
285		buf_index = 0;
286
287		length = mem->map[map_index]->buf[buf_index].size;
 
 
 
288
289		while (offset >= length) {
290			offset -= length;
291			buf_index++;
292
293			if (buf_index == RXE_BUF_PER_MAP) {
294				map_index++;
295				buf_index = 0;
296			}
297			length = mem->map[map_index]->buf[buf_index].size;
298		}
299
300		*m_out = map_index;
301		*n_out = buf_index;
302		*offset_out = offset;
303	}
 
 
304}
305
306void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
 
307{
308	size_t offset;
309	int m, n;
310	void *addr;
311
312	if (mem->state != RXE_MEM_STATE_VALID) {
313		pr_warn("mem not in valid state\n");
314		addr = NULL;
315		goto out;
316	}
317
318	if (!mem->map) {
319		addr = (void *)(uintptr_t)iova;
320		goto out;
321	}
322
323	if (mem_check_range(mem, iova, length)) {
324		pr_warn("range violation\n");
325		addr = NULL;
326		goto out;
327	}
 
 
 
328
329	lookup_iova(mem, iova, &m, &n, &offset);
 
 
 
330
331	if (offset + length > mem->map[m]->buf[n].size) {
332		pr_warn("crosses page boundary\n");
333		addr = NULL;
334		goto out;
 
 
 
 
 
 
 
 
 
335	}
336
337	addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
 
 
 
 
 
 
 
 
 
338
339out:
340	return addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341}
342
343/* copy data from a range (vaddr, vaddr+length-1) to or from
344 * a mem object starting at iova. Compute incremental value of
345 * crc32 if crcp is not zero. caller must hold a reference to mem
346 */
347int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
348		 enum copy_direction dir, u32 *crcp)
349{
350	int			err;
351	int			bytes;
352	u8			*va;
353	struct rxe_map		**map;
354	struct rxe_phys_buf	*buf;
355	int			m;
356	int			i;
357	size_t			offset;
358	u32			crc = crcp ? (*crcp) : 0;
359
360	if (length == 0)
361		return 0;
362
363	if (mem->type == RXE_MEM_TYPE_DMA) {
364		u8 *src, *dest;
365
366		src  = (dir == to_mem_obj) ?
367			addr : ((void *)(uintptr_t)iova);
368
369		dest = (dir == to_mem_obj) ?
370			((void *)(uintptr_t)iova) : addr;
371
372		if (crcp)
373			*crcp = crc32_le(*crcp, src, length);
374
375		memcpy(dest, src, length);
376
 
 
377		return 0;
378	}
379
380	WARN_ON(!mem->map);
381
382	err = mem_check_range(mem, iova, length);
383	if (err) {
384		err = -EFAULT;
385		goto err1;
386	}
387
388	lookup_iova(mem, iova, &m, &i, &offset);
389
390	map	= mem->map + m;
391	buf	= map[0]->buf + i;
392
393	while (length > 0) {
394		u8 *src, *dest;
395
396		va	= (u8 *)(uintptr_t)buf->addr + offset;
397		src  = (dir == to_mem_obj) ? addr : va;
398		dest = (dir == to_mem_obj) ? va : addr;
399
400		bytes	= buf->size - offset;
401
402		if (bytes > length)
403			bytes = length;
404
405		if (crcp)
406			crc = crc32_le(crc, src, bytes);
407
408		memcpy(dest, src, bytes);
409
410		length	-= bytes;
411		addr	+= bytes;
412
413		offset	= 0;
414		buf++;
415		i++;
416
417		if (i == RXE_BUF_PER_MAP) {
418			i = 0;
419			map++;
420			buf = map[0]->buf;
421		}
422	}
423
424	if (crcp)
425		*crcp = crc;
426
427	return 0;
428
429err1:
430	return err;
431}
432
433/* copy data in or out of a wqe, i.e. sg list
434 * under the control of a dma descriptor
435 */
436int copy_data(
437	struct rxe_dev		*rxe,
438	struct rxe_pd		*pd,
439	int			access,
440	struct rxe_dma_info	*dma,
441	void			*addr,
442	int			length,
443	enum copy_direction	dir,
444	u32			*crcp)
445{
446	int			bytes;
447	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
448	int			offset	= dma->sge_offset;
449	int			resid	= dma->resid;
450	struct rxe_mem		*mem	= NULL;
451	u64			iova;
452	int			err;
453
454	if (length == 0)
455		return 0;
456
457	if (length > resid) {
458		err = -EINVAL;
459		goto err2;
460	}
461
462	if (sge->length && (offset < sge->length)) {
463		mem = lookup_mem(pd, access, sge->lkey, lookup_local);
464		if (!mem) {
465			err = -EINVAL;
466			goto err1;
467		}
468	}
469
470	while (length > 0) {
471		bytes = length;
472
473		if (offset >= sge->length) {
474			if (mem) {
475				rxe_drop_ref(mem);
476				mem = NULL;
477			}
478			sge++;
479			dma->cur_sge++;
480			offset = 0;
481
482			if (dma->cur_sge >= dma->num_sge) {
483				err = -ENOSPC;
484				goto err2;
485			}
486
487			if (sge->length) {
488				mem = lookup_mem(pd, access, sge->lkey,
489						 lookup_local);
490				if (!mem) {
491					err = -EINVAL;
492					goto err1;
493				}
494			} else {
495				continue;
496			}
497		}
498
499		if (bytes > sge->length - offset)
500			bytes = sge->length - offset;
501
502		if (bytes > 0) {
503			iova = sge->addr + offset;
504
505			err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
506			if (err)
507				goto err2;
508
509			offset	+= bytes;
510			resid	-= bytes;
511			length	-= bytes;
512			addr	+= bytes;
513		}
514	}
515
516	dma->sge_offset = offset;
517	dma->resid	= resid;
518
519	if (mem)
520		rxe_drop_ref(mem);
521
522	return 0;
523
524err2:
525	if (mem)
526		rxe_drop_ref(mem);
527err1:
528	return err;
529}
530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
532{
533	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
534	int			offset	= dma->sge_offset;
535	int			resid	= dma->resid;
536
537	while (length) {
538		unsigned int bytes;
539
540		if (offset >= sge->length) {
541			sge++;
542			dma->cur_sge++;
543			offset = 0;
544			if (dma->cur_sge >= dma->num_sge)
545				return -ENOSPC;
546		}
547
548		bytes = length;
549
550		if (bytes > sge->length - offset)
551			bytes = sge->length - offset;
552
553		offset	+= bytes;
554		resid	-= bytes;
555		length	-= bytes;
556	}
557
558	dma->sge_offset = offset;
559	dma->resid	= resid;
560
561	return 0;
562}
563
564/* (1) find the mem (mr or mw) corresponding to lkey/rkey
565 *     depending on lookup_type
566 * (2) verify that the (qp) pd matches the mem pd
567 * (3) verify that the mem can support the requested access
568 * (4) verify that mem state is valid
569 */
570struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
571			   enum lookup_type type)
572{
573	struct rxe_mem *mem;
574	struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
575	int index = key >> 8;
576
577	if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
578		mem = rxe_pool_get_index(&rxe->mr_pool, index);
579		if (!mem)
580			goto err1;
581	} else {
582		goto err1;
 
 
 
 
583	}
584
585	if ((type == lookup_local && mem->lkey != key) ||
586	    (type == lookup_remote && mem->rkey != key))
587		goto err2;
588
589	if (mem->pd != pd)
590		goto err2;
 
 
 
 
591
592	if (access && !(access & mem->access))
593		goto err2;
 
 
 
 
594
595	if (mem->state != RXE_MEM_STATE_VALID)
596		goto err2;
 
 
 
 
 
 
 
 
 
 
 
597
598	return mem;
 
 
 
 
599
600err2:
601	rxe_drop_ref(mem);
602err1:
603	return NULL;
 
 
 
604}
605
606int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
607		      u64 *page, int num_pages, u64 iova)
 
 
 
 
 
 
608{
609	int i;
610	int num_buf;
611	int err;
612	struct rxe_map **map;
613	struct rxe_phys_buf *buf;
614	int page_size;
615
616	if (num_pages > mem->max_buf) {
617		err = -EINVAL;
618		goto err1;
 
619	}
620
621	num_buf		= 0;
622	page_size	= 1 << mem->page_shift;
623	map		= mem->map;
624	buf		= map[0]->buf;
625
626	for (i = 0; i < num_pages; i++) {
627		buf->addr = *page++;
628		buf->size = page_size;
629		buf++;
630		num_buf++;
631
632		if (num_buf == RXE_BUF_PER_MAP) {
633			map++;
634			buf = map[0]->buf;
635			num_buf = 0;
636		}
637	}
638
639	mem->iova	= iova;
640	mem->va		= iova;
641	mem->length	= num_pages << mem->page_shift;
642	mem->state	= RXE_MEM_STATE_VALID;
 
643
644	return 0;
 
645
646err1:
647	return err;
 
 
 
 
 
 
 
648}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/libnvdimm.h>
  8
  9#include "rxe.h"
 10#include "rxe_loc.h"
 11
 12/* Return a random 8 bit key value that is
 13 * different than the last_key. Set last_key to -1
 14 * if this is the first key for an MR or MW
 15 */
 16u8 rxe_get_next_key(u32 last_key)
 17{
 18	u8 key;
 
 
 
 
 
 19
 20	do {
 21		get_random_bytes(&key, 1);
 22	} while (key == last_key);
 23
 24	return key;
 25}
 26
 27int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
 28{
 29	switch (mr->ibmr.type) {
 30	case IB_MR_TYPE_DMA:
 31		return 0;
 32
 33	case IB_MR_TYPE_USER:
 34	case IB_MR_TYPE_MEM_REG:
 35		if (iova < mr->ibmr.iova ||
 36		    iova + length > mr->ibmr.iova + mr->ibmr.length) {
 37			rxe_dbg_mr(mr, "iova/length out of range\n");
 38			return -EINVAL;
 39		}
 40		return 0;
 41
 42	default:
 43		rxe_dbg_mr(mr, "mr type not supported\n");
 44		return -EINVAL;
 45	}
 46}
 47
 48static void rxe_mr_init(int access, struct rxe_mr *mr)
 
 
 
 
 49{
 50	u32 key = mr->elem.index << 8 | rxe_get_next_key(-1);
 
 51
 52	/* set ibmr->l/rkey and also copy into private l/rkey
 53	 * for user MRs these will always be the same
 54	 * for cases where caller 'owns' the key portion
 55	 * they may be different until REG_MR WQE is executed.
 56	 */
 57	mr->lkey = mr->ibmr.lkey = key;
 58	mr->rkey = mr->ibmr.rkey = key;
 59
 60	mr->access = access;
 61	mr->ibmr.page_size = PAGE_SIZE;
 62	mr->page_mask = PAGE_MASK;
 63	mr->page_shift = PAGE_SHIFT;
 64	mr->state = RXE_MR_STATE_INVALID;
 65}
 66
 67void rxe_mr_init_dma(int access, struct rxe_mr *mr)
 68{
 69	rxe_mr_init(access, mr);
 
 
 
 
 70
 71	mr->state = RXE_MR_STATE_VALID;
 72	mr->ibmr.type = IB_MR_TYPE_DMA;
 
 
 
 
 73}
 74
 75static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
 76{
 77	return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
 78}
 
 
 
 79
 80static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
 81{
 82	return iova & (mr_page_size(mr) - 1);
 83}
 84
 85static bool is_pmem_page(struct page *pg)
 86{
 87	unsigned long paddr = page_to_phys(pg);
 
 
 88
 89	return REGION_INTERSECTS ==
 90	       region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM,
 91				 IORES_DESC_PERSISTENT_MEMORY);
 92}
 93
 94static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
 95{
 96	XA_STATE(xas, &mr->page_list, 0);
 97	struct sg_page_iter sg_iter;
 98	struct page *page;
 99	bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
100
101	__sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
102	if (!__sg_page_iter_next(&sg_iter))
103		return 0;
104
105	do {
106		xas_lock(&xas);
107		while (true) {
108			page = sg_page_iter_page(&sg_iter);
109
110			if (persistent && !is_pmem_page(page)) {
111				rxe_dbg_mr(mr, "Page can't be persistent\n");
112				xas_set_err(&xas, -EINVAL);
113				break;
114			}
115
116			xas_store(&xas, page);
117			if (xas_error(&xas))
118				break;
119			xas_next(&xas);
120			if (!__sg_page_iter_next(&sg_iter))
121				break;
122		}
123		xas_unlock(&xas);
124	} while (xas_nomem(&xas, GFP_KERNEL));
125
126	return xas_error(&xas);
 
 
127}
128
129int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
130		     int access, struct rxe_mr *mr)
131{
132	struct ib_umem *umem;
133	int err;
 
 
 
 
134
135	rxe_mr_init(access, mr);
 
136
137	xa_init(&mr->page_list);
 
 
 
 
 
 
 
 
 
 
 
138
139	umem = ib_umem_get(&rxe->ib_dev, start, length, access);
140	if (IS_ERR(umem)) {
141		rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
142			(int)PTR_ERR(umem));
143		return PTR_ERR(umem);
 
144	}
145
146	err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
 
 
 
 
 
147	if (err) {
 
148		ib_umem_release(umem);
149		return err;
150	}
151
152	mr->umem = umem;
153	mr->ibmr.type = IB_MR_TYPE_USER;
154	mr->state = RXE_MR_STATE_VALID;
155
156	return 0;
157}
158
159static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
160{
161	XA_STATE(xas, &mr->page_list, 0);
162	int i = 0;
163	int err;
 
 
 
 
 
 
 
164
165	xa_init(&mr->page_list);
166
167	do {
168		xas_lock(&xas);
169		while (i != num_buf) {
170			xas_store(&xas, XA_ZERO_ENTRY);
171			if (xas_error(&xas))
172				break;
173			xas_next(&xas);
174			i++;
175		}
176		xas_unlock(&xas);
177	} while (xas_nomem(&xas, GFP_KERNEL));
178
179	err = xas_error(&xas);
180	if (err)
181		return err;
 
 
 
 
 
 
182
183	mr->num_buf = num_buf;
184
185	return 0;
 
186}
187
188int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
 
189{
190	int err;
191
192	/* always allow remote access for FMRs */
193	rxe_mr_init(RXE_ACCESS_REMOTE, mr);
 
 
194
195	err = rxe_mr_alloc(mr, max_pages);
196	if (err)
197		goto err1;
198
199	mr->state = RXE_MR_STATE_FREE;
200	mr->ibmr.type = IB_MR_TYPE_MEM_REG;
 
 
201
202	return 0;
203
204err1:
205	return err;
206}
207
208static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
209{
210	struct rxe_mr *mr = to_rmr(ibmr);
211	struct page *page = ib_virt_dma_to_page(dma_addr);
212	bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
213	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
215	if (persistent && !is_pmem_page(page)) {
216		rxe_dbg_mr(mr, "Page cannot be persistent\n");
217		return -EINVAL;
218	}
219
220	if (unlikely(mr->nbuf == mr->num_buf))
221		return -ENOMEM;
 
 
 
 
 
 
 
 
222
223	err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
224	if (err)
225		return err;
226
227	mr->nbuf++;
228	return 0;
229}
230
231int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
232		  int sg_nents, unsigned int *sg_offset)
233{
234	struct rxe_mr *mr = to_rmr(ibmr);
235	unsigned int page_size = mr_page_size(mr);
 
236
237	mr->nbuf = 0;
238	mr->page_shift = ilog2(page_size);
239	mr->page_mask = ~((u64)page_size - 1);
240	mr->page_offset = mr->ibmr.iova & (page_size - 1);
 
241
242	return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
243}
 
 
244
245static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
246			      unsigned int length, enum rxe_mr_copy_dir dir)
247{
248	unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
249	unsigned long index = rxe_mr_iova_to_index(mr, iova);
250	unsigned int bytes;
251	struct page *page;
252	void *va;
253
254	while (length) {
255		page = xa_load(&mr->page_list, index);
256		if (!page)
257			return -EFAULT;
258
259		bytes = min_t(unsigned int, length,
260				mr_page_size(mr) - page_offset);
261		va = kmap_local_page(page);
262		if (dir == RXE_FROM_MR_OBJ)
263			memcpy(addr, va + page_offset, bytes);
264		else
265			memcpy(va + page_offset, addr, bytes);
266		kunmap_local(va);
267
268		page_offset = 0;
269		addr += bytes;
270		length -= bytes;
271		index++;
272	}
273
274	return 0;
275}
276
277static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
278			    unsigned int length, enum rxe_mr_copy_dir dir)
279{
280	unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
281	unsigned int bytes;
282	struct page *page;
283	u8 *va;
284
285	while (length) {
286		page = ib_virt_dma_to_page(dma_addr);
287		bytes = min_t(unsigned int, length,
288				PAGE_SIZE - page_offset);
289		va = kmap_local_page(page);
290
291		if (dir == RXE_TO_MR_OBJ)
292			memcpy(va + page_offset, addr, bytes);
293		else
294			memcpy(addr, va + page_offset, bytes);
295
296		kunmap_local(va);
297		page_offset = 0;
298		dma_addr += bytes;
299		addr += bytes;
300		length -= bytes;
301	}
302}
303
304int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
305		unsigned int length, enum rxe_mr_copy_dir dir)
 
 
 
 
306{
307	int err;
 
 
 
 
 
 
 
 
308
309	if (length == 0)
310		return 0;
311
312	if (WARN_ON(!mr))
313		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
314
315	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
316		rxe_mr_copy_dma(mr, iova, addr, length, dir);
317		return 0;
318	}
319
320	err = mr_check_range(mr, iova, length);
321	if (unlikely(err)) {
322		rxe_dbg_mr(mr, "iova out of range\n");
323		return err;
 
 
324	}
325
326	return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327}
328
329/* copy data in or out of a wqe, i.e. sg list
330 * under the control of a dma descriptor
331 */
332int copy_data(
 
333	struct rxe_pd		*pd,
334	int			access,
335	struct rxe_dma_info	*dma,
336	void			*addr,
337	int			length,
338	enum rxe_mr_copy_dir	dir)
 
339{
340	int			bytes;
341	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
342	int			offset	= dma->sge_offset;
343	int			resid	= dma->resid;
344	struct rxe_mr		*mr	= NULL;
345	u64			iova;
346	int			err;
347
348	if (length == 0)
349		return 0;
350
351	if (length > resid) {
352		err = -EINVAL;
353		goto err2;
354	}
355
356	if (sge->length && (offset < sge->length)) {
357		mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
358		if (!mr) {
359			err = -EINVAL;
360			goto err1;
361		}
362	}
363
364	while (length > 0) {
365		bytes = length;
366
367		if (offset >= sge->length) {
368			if (mr) {
369				rxe_put(mr);
370				mr = NULL;
371			}
372			sge++;
373			dma->cur_sge++;
374			offset = 0;
375
376			if (dma->cur_sge >= dma->num_sge) {
377				err = -ENOSPC;
378				goto err2;
379			}
380
381			if (sge->length) {
382				mr = lookup_mr(pd, access, sge->lkey,
383					       RXE_LOOKUP_LOCAL);
384				if (!mr) {
385					err = -EINVAL;
386					goto err1;
387				}
388			} else {
389				continue;
390			}
391		}
392
393		if (bytes > sge->length - offset)
394			bytes = sge->length - offset;
395
396		if (bytes > 0) {
397			iova = sge->addr + offset;
398			err = rxe_mr_copy(mr, iova, addr, bytes, dir);
 
399			if (err)
400				goto err2;
401
402			offset	+= bytes;
403			resid	-= bytes;
404			length	-= bytes;
405			addr	+= bytes;
406		}
407	}
408
409	dma->sge_offset = offset;
410	dma->resid	= resid;
411
412	if (mr)
413		rxe_put(mr);
414
415	return 0;
416
417err2:
418	if (mr)
419		rxe_put(mr);
420err1:
421	return err;
422}
423
424int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
425{
426	unsigned int page_offset;
427	unsigned long index;
428	struct page *page;
429	unsigned int bytes;
430	int err;
431	u8 *va;
432
433	/* mr must be valid even if length is zero */
434	if (WARN_ON(!mr))
435		return -EINVAL;
436
437	if (length == 0)
438		return 0;
439
440	if (mr->ibmr.type == IB_MR_TYPE_DMA)
441		return -EFAULT;
442
443	err = mr_check_range(mr, iova, length);
444	if (err)
445		return err;
446
447	while (length > 0) {
448		index = rxe_mr_iova_to_index(mr, iova);
449		page = xa_load(&mr->page_list, index);
450		page_offset = rxe_mr_iova_to_page_offset(mr, iova);
451		if (!page)
452			return -EFAULT;
453		bytes = min_t(unsigned int, length,
454				mr_page_size(mr) - page_offset);
455
456		va = kmap_local_page(page);
457		arch_wb_cache_pmem(va + page_offset, bytes);
458		kunmap_local(va);
459
460		length -= bytes;
461		iova += bytes;
462		page_offset = 0;
463	}
464
465	return 0;
466}
467
468/* Guarantee atomicity of atomic operations at the machine level. */
469static DEFINE_SPINLOCK(atomic_ops_lock);
470
471int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
472			u64 compare, u64 swap_add, u64 *orig_val)
473{
474	unsigned int page_offset;
475	struct page *page;
476	u64 value;
477	u64 *va;
478
479	if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
480		rxe_dbg_mr(mr, "mr not in valid state\n");
481		return RESPST_ERR_RKEY_VIOLATION;
482	}
483
484	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
485		page_offset = iova & (PAGE_SIZE - 1);
486		page = ib_virt_dma_to_page(iova);
487	} else {
488		unsigned long index;
489		int err;
490
491		err = mr_check_range(mr, iova, sizeof(value));
492		if (err) {
493			rxe_dbg_mr(mr, "iova out of range\n");
494			return RESPST_ERR_RKEY_VIOLATION;
495		}
496		page_offset = rxe_mr_iova_to_page_offset(mr, iova);
497		index = rxe_mr_iova_to_index(mr, iova);
498		page = xa_load(&mr->page_list, index);
499		if (!page)
500			return RESPST_ERR_RKEY_VIOLATION;
501	}
502
503	if (unlikely(page_offset & 0x7)) {
504		rxe_dbg_mr(mr, "iova not aligned\n");
505		return RESPST_ERR_MISALIGNED_ATOMIC;
506	}
507
508	va = kmap_local_page(page);
509
510	spin_lock_bh(&atomic_ops_lock);
511	value = *orig_val = va[page_offset >> 3];
512
513	if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
514		if (value == compare)
515			va[page_offset >> 3] = swap_add;
516	} else {
517		value += swap_add;
518		va[page_offset >> 3] = value;
519	}
520	spin_unlock_bh(&atomic_ops_lock);
521
522	kunmap_local(va);
523
524	return 0;
525}
526
527#if defined CONFIG_64BIT
528/* only implemented or called for 64 bit architectures */
529int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
530{
531	unsigned int page_offset;
532	struct page *page;
533	u64 *va;
534
535	/* See IBA oA19-28 */
536	if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
537		rxe_dbg_mr(mr, "mr not in valid state\n");
538		return RESPST_ERR_RKEY_VIOLATION;
539	}
540
541	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
542		page_offset = iova & (PAGE_SIZE - 1);
543		page = ib_virt_dma_to_page(iova);
544	} else {
545		unsigned long index;
546		int err;
547
548		/* See IBA oA19-28 */
549		err = mr_check_range(mr, iova, sizeof(value));
550		if (unlikely(err)) {
551			rxe_dbg_mr(mr, "iova out of range\n");
552			return RESPST_ERR_RKEY_VIOLATION;
553		}
554		page_offset = rxe_mr_iova_to_page_offset(mr, iova);
555		index = rxe_mr_iova_to_index(mr, iova);
556		page = xa_load(&mr->page_list, index);
557		if (!page)
558			return RESPST_ERR_RKEY_VIOLATION;
559	}
560
561	/* See IBA A19.4.2 */
562	if (unlikely(page_offset & 0x7)) {
563		rxe_dbg_mr(mr, "misaligned address\n");
564		return RESPST_ERR_MISALIGNED_ATOMIC;
565	}
566
567	va = kmap_local_page(page);
568
569	/* Do atomic write after all prior operations have completed */
570	smp_store_release(&va[page_offset >> 3], value);
571
572	kunmap_local(va);
573
574	return 0;
575}
576#else
577int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
578{
579	return RESPST_ERR_UNSUPPORTED_OPCODE;
580}
581#endif
582
583int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
584{
585	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
586	int			offset	= dma->sge_offset;
587	int			resid	= dma->resid;
588
589	while (length) {
590		unsigned int bytes;
591
592		if (offset >= sge->length) {
593			sge++;
594			dma->cur_sge++;
595			offset = 0;
596			if (dma->cur_sge >= dma->num_sge)
597				return -ENOSPC;
598		}
599
600		bytes = length;
601
602		if (bytes > sge->length - offset)
603			bytes = sge->length - offset;
604
605		offset	+= bytes;
606		resid	-= bytes;
607		length	-= bytes;
608	}
609
610	dma->sge_offset = offset;
611	dma->resid	= resid;
612
613	return 0;
614}
615
616struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
617			 enum rxe_mr_lookup_type type)
 
 
 
 
 
 
618{
619	struct rxe_mr *mr;
620	struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
621	int index = key >> 8;
622
623	mr = rxe_pool_get_index(&rxe->mr_pool, index);
624	if (!mr)
625		return NULL;
626
627	if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
628		     (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
629		     mr_pd(mr) != pd || ((access & mr->access) != access) ||
630		     mr->state != RXE_MR_STATE_VALID)) {
631		rxe_put(mr);
632		mr = NULL;
633	}
634
635	return mr;
636}
 
637
638int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
639{
640	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
641	struct rxe_mr *mr;
642	int remote;
643	int ret;
644
645	mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
646	if (!mr) {
647		rxe_dbg_qp(qp, "No MR for key %#x\n", key);
648		ret = -EINVAL;
649		goto err;
650	}
651
652	remote = mr->access & RXE_ACCESS_REMOTE;
653	if (remote ? (key != mr->rkey) : (key != mr->lkey)) {
654		rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
655			key, (remote ? mr->rkey : mr->lkey));
656		ret = -EINVAL;
657		goto err_drop_ref;
658	}
659
660	if (atomic_read(&mr->num_mw) > 0) {
661		rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
662		ret = -EINVAL;
663		goto err_drop_ref;
664	}
665
666	if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
667		rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
668		ret = -EINVAL;
669		goto err_drop_ref;
670	}
671
672	mr->state = RXE_MR_STATE_FREE;
673	ret = 0;
674
675err_drop_ref:
676	rxe_put(mr);
677err:
678	return ret;
679}
680
681/* user can (re)register fast MR by executing a REG_MR WQE.
682 * user is expected to hold a reference on the ib mr until the
683 * WQE completes.
684 * Once a fast MR is created this is the only way to change the
685 * private keys. It is the responsibility of the user to maintain
686 * the ib mr keys in sync with rxe mr keys.
687 */
688int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
689{
690	struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
691	u32 key = wqe->wr.wr.reg.key;
692	u32 access = wqe->wr.wr.reg.access;
 
 
 
693
694	/* user can only register MR in free state */
695	if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
696		rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
697		return -EINVAL;
698	}
699
700	/* user can only register mr with qp in same protection domain */
701	if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
702		rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
703		return -EINVAL;
704	}
705
706	/* user is only allowed to change key portion of l/rkey */
707	if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
708		rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
709			key, mr->lkey);
710		return -EINVAL;
 
 
 
 
 
711	}
712
713	mr->access = access;
714	mr->lkey = key;
715	mr->rkey = key;
716	mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
717	mr->state = RXE_MR_STATE_VALID;
718
719	return 0;
720}
721
722void rxe_mr_cleanup(struct rxe_pool_elem *elem)
723{
724	struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
725
726	rxe_put(mr_pd(mr));
727	ib_umem_release(mr->umem);
728
729	if (mr->ibmr.type != IB_MR_TYPE_DMA)
730		xa_destroy(&mr->page_list);
731}