Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/* Copyright (c) 2020 Mellanox Technologies Ltd. */
  3
  4#include <linux/vdpa.h>
  5#include <linux/gcd.h>
  6#include <linux/string.h>
  7#include <linux/mlx5/qp.h>
  8#include "mlx5_vdpa.h"
  9
 10/* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
 11#define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
 12({ \
 13	u64 __s = _s; \
 14	u64 _res; \
 15	_res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
 16	_res; \
 17})
 18
 19static int get_octo_len(u64 len, int page_shift)
 20{
 21	u64 page_size = 1ULL << page_shift;
 22	int npages;
 23
 24	npages = ALIGN(len, page_size) >> page_shift;
 25	return (npages + 1) / 2;
 26}
 27
 28static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
 29{
 30	struct scatterlist *sg;
 31	__be64 *pas;
 32	int i;
 33
 34	pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
 35	for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
 36		(*pas) = cpu_to_be64(sg_dma_address(sg));
 37}
 38
 39static void mlx5_set_access_mode(void *mkc, int mode)
 40{
 41	MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
 42	MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
 43}
 44
 45static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
 46{
 47	struct scatterlist *sg;
 48	int i;
 49
 50	for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
 51		mtt[i] = cpu_to_be64(sg_dma_address(sg));
 52}
 53
 54static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
 55{
 56	int inlen;
 57	void *mkc;
 58	void *in;
 59	int err;
 60
 61	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
 62	in = kvzalloc(inlen, GFP_KERNEL);
 63	if (!in)
 64		return -ENOMEM;
 65
 66	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
 67	fill_sg(mr, in);
 68	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 69	MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
 70	MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
 71	mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
 72	MLX5_SET(mkc, mkc, qpn, 0xffffff);
 73	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
 74	MLX5_SET64(mkc, mkc, start_addr, mr->offset);
 75	MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
 76	MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
 77	MLX5_SET(mkc, mkc, translations_octword_size,
 78		 get_octo_len(mr->end - mr->start, mr->log_size));
 79	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
 80		 get_octo_len(mr->end - mr->start, mr->log_size));
 81	populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
 82	err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
 83	kvfree(in);
 84	if (err) {
 85		mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
 86		return err;
 87	}
 88
 89	return 0;
 90}
 91
 92static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
 93{
 94	mlx5_vdpa_destroy_mkey(mvdev, &mr->mr);
 95}
 96
 97static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
 98{
 99	return max_t(u64, map->start, mr->start);
100}
101
102static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
103{
104	return min_t(u64, map->last + 1, mr->end);
105}
106
107static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
108{
109	return map_end(map, mr) - map_start(map, mr);
110}
111
112#define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
113#define MLX5_VDPA_INVALID_LEN ((u64)-1)
114
115static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
116{
117	struct mlx5_vdpa_direct_mr *s;
118
119	s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
120	if (!s)
121		return MLX5_VDPA_INVALID_START_ADDR;
122
123	return s->start;
124}
125
126static u64 indir_len(struct mlx5_vdpa_mr *mkey)
127{
128	struct mlx5_vdpa_direct_mr *s;
129	struct mlx5_vdpa_direct_mr *e;
130
131	s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
132	if (!s)
133		return MLX5_VDPA_INVALID_LEN;
134
135	e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
136
137	return e->end - s->start;
138}
139
140#define LOG_MAX_KLM_SIZE 30
141#define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
142
143static u32 klm_bcount(u64 size)
144{
145	return (u32)size;
146}
147
148static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
149{
150	struct mlx5_vdpa_direct_mr *dmr;
151	struct mlx5_klm *klmarr;
152	struct mlx5_klm *klm;
153	bool first = true;
154	u64 preve;
155	int i;
156
157	klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
158	i = 0;
159	list_for_each_entry(dmr, &mkey->head, list) {
160again:
161		klm = &klmarr[i++];
162		if (first) {
163			preve = dmr->start;
164			first = false;
165		}
166
167		if (preve == dmr->start) {
168			klm->key = cpu_to_be32(dmr->mr.key);
169			klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
170			preve = dmr->end;
171		} else {
172			klm->key = cpu_to_be32(mvdev->res.null_mkey);
173			klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
174			preve = dmr->start;
175			goto again;
176		}
177	}
178}
179
180static int klm_byte_size(int nklms)
181{
182	return 16 * ALIGN(nklms, 4);
183}
184
185static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
186{
187	int inlen;
188	void *mkc;
189	void *in;
190	int err;
191	u64 start;
192	u64 len;
193
194	start = indir_start_addr(mr);
195	len = indir_len(mr);
196	if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
197		return -EINVAL;
198
199	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
200	in = kzalloc(inlen, GFP_KERNEL);
201	if (!in)
202		return -ENOMEM;
203
204	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
205	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
206	MLX5_SET(mkc, mkc, lw, 1);
207	MLX5_SET(mkc, mkc, lr, 1);
208	mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
209	MLX5_SET(mkc, mkc, qpn, 0xffffff);
210	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
211	MLX5_SET64(mkc, mkc, start_addr, start);
212	MLX5_SET64(mkc, mkc, len, len);
213	MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
214	MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
215	fill_indir(mvdev, mr, in);
216	err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
217	kfree(in);
218	return err;
219}
220
221static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
222{
223	mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
224}
225
226static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
227			 struct vhost_iotlb *iotlb)
228{
229	struct vhost_iotlb_map *map;
230	unsigned long lgcd = 0;
231	int log_entity_size;
232	unsigned long size;
233	u64 start = 0;
234	int err;
235	struct page *pg;
236	unsigned int nsg;
237	int sglen;
238	u64 pa;
239	u64 paend;
240	struct scatterlist *sg;
241	struct device *dma = mvdev->mdev->device;
242	int ret;
243
244	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
245	     map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
246		size = maplen(map, mr);
247		lgcd = gcd(lgcd, size);
248		start += size;
249	}
250	log_entity_size = ilog2(lgcd);
251
252	sglen = 1 << log_entity_size;
253	nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
254
255	err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
256	if (err)
257		return err;
258
259	sg = mr->sg_head.sgl;
260	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
261	     map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
262		paend = map->addr + maplen(map, mr);
263		for (pa = map->addr; pa < paend; pa += sglen) {
264			pg = pfn_to_page(__phys_to_pfn(pa));
265			if (!sg) {
266				mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
267					       map->start, map->last + 1);
268				err = -ENOMEM;
269				goto err_map;
270			}
271			sg_set_page(sg, pg, sglen, 0);
272			sg = sg_next(sg);
273			if (!sg)
274				goto done;
275		}
276	}
277done:
278	mr->log_size = log_entity_size;
279	mr->nsg = nsg;
280	ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
281	if (!ret)
282		goto err_map;
283
284	err = create_direct_mr(mvdev, mr);
285	if (err)
286		goto err_direct;
287
288	return 0;
289
290err_direct:
291	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
292err_map:
293	sg_free_table(&mr->sg_head);
294	return err;
295}
296
297static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
298{
299	struct device *dma = mvdev->mdev->device;
300
301	destroy_direct_mr(mvdev, mr);
302	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
303	sg_free_table(&mr->sg_head);
304}
305
306static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
307			    struct vhost_iotlb *iotlb)
308{
309	struct mlx5_vdpa_mr *mr = &mvdev->mr;
310	struct mlx5_vdpa_direct_mr *dmr;
311	struct mlx5_vdpa_direct_mr *n;
312	LIST_HEAD(tmp);
313	u64 st;
314	u64 sz;
315	int err;
316	int i = 0;
317
318	st = start;
319	while (size) {
320		sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
321		dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
322		if (!dmr) {
323			err = -ENOMEM;
324			goto err_alloc;
325		}
326
327		dmr->start = st;
328		dmr->end = st + sz;
329		dmr->perm = perm;
330		err = map_direct_mr(mvdev, dmr, iotlb);
331		if (err) {
332			kfree(dmr);
333			goto err_alloc;
334		}
335
336		list_add_tail(&dmr->list, &tmp);
337		size -= sz;
338		mr->num_directs++;
339		mr->num_klms++;
340		st += sz;
341		i++;
342	}
343	list_splice_tail(&tmp, &mr->head);
344	return 0;
345
346err_alloc:
347	list_for_each_entry_safe(dmr, n, &mr->head, list) {
348		list_del_init(&dmr->list);
349		unmap_direct_mr(mvdev, dmr);
350		kfree(dmr);
351	}
352	return err;
353}
354
355/* The iotlb pointer contains a list of maps. Go over the maps, possibly
356 * merging mergeable maps, and create direct memory keys that provide the
357 * device access to memory. The direct mkeys are then referred to by the
358 * indirect memory key that provides access to the enitre address space given
359 * by iotlb.
360 */
361static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
362{
363	struct mlx5_vdpa_mr *mr = &mvdev->mr;
364	struct mlx5_vdpa_direct_mr *dmr;
365	struct mlx5_vdpa_direct_mr *n;
366	struct vhost_iotlb_map *map;
367	u32 pperm = U16_MAX;
368	u64 last = U64_MAX;
369	u64 ps = U64_MAX;
370	u64 pe = U64_MAX;
371	u64 start = 0;
372	int err = 0;
373	int nnuls;
374
375	if (mr->initialized)
376		return 0;
377
378	INIT_LIST_HEAD(&mr->head);
379	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
380	     map = vhost_iotlb_itree_next(map, start, last)) {
381		start = map->start;
382		if (pe == map->start && pperm == map->perm) {
383			pe = map->last + 1;
384		} else {
385			if (ps != U64_MAX) {
386				if (pe < map->start) {
387					/* We have a hole in the map. Check how
388					 * many null keys are required to fill it.
389					 */
390					nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
391								       LOG_MAX_KLM_SIZE);
392					mr->num_klms += nnuls;
393				}
394				err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
395				if (err)
396					goto err_chain;
397			}
398			ps = map->start;
399			pe = map->last + 1;
400			pperm = map->perm;
401		}
402	}
403	err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
404	if (err)
405		goto err_chain;
406
407	/* Create the memory key that defines the guests's address space. This
408	 * memory key refers to the direct keys that contain the MTT
409	 * translations
410	 */
411	err = create_indirect_key(mvdev, mr);
412	if (err)
413		goto err_chain;
414
415	mr->initialized = true;
416	return 0;
417
418err_chain:
419	list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
420		list_del_init(&dmr->list);
421		unmap_direct_mr(mvdev, dmr);
422		kfree(dmr);
423	}
424	return err;
425}
426
427int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
428{
429	struct mlx5_vdpa_mr *mr = &mvdev->mr;
430	int err;
431
432	mutex_lock(&mr->mkey_mtx);
433	err = _mlx5_vdpa_create_mr(mvdev, iotlb);
434	mutex_unlock(&mr->mkey_mtx);
435	return err;
436}
437
438void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
439{
440	struct mlx5_vdpa_mr *mr = &mvdev->mr;
441	struct mlx5_vdpa_direct_mr *dmr;
442	struct mlx5_vdpa_direct_mr *n;
443
444	mutex_lock(&mr->mkey_mtx);
445	if (!mr->initialized)
446		goto out;
447
448	destroy_indirect_key(mvdev, mr);
449	list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
450		list_del_init(&dmr->list);
451		unmap_direct_mr(mvdev, dmr);
452		kfree(dmr);
453	}
454	memset(mr, 0, sizeof(*mr));
455	mr->initialized = false;
456out:
457	mutex_unlock(&mr->mkey_mtx);
458}
459
460static bool map_empty(struct vhost_iotlb *iotlb)
461{
462	return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
463}
464
465int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
466			     bool *change_map)
467{
468	struct mlx5_vdpa_mr *mr = &mvdev->mr;
469	int err = 0;
470
471	*change_map = false;
472	if (map_empty(iotlb)) {
473		mlx5_vdpa_destroy_mr(mvdev);
474		return 0;
475	}
476	mutex_lock(&mr->mkey_mtx);
477	if (mr->initialized) {
478		mlx5_vdpa_info(mvdev, "memory map update\n");
479		*change_map = true;
480	}
481	if (!*change_map)
482		err = _mlx5_vdpa_create_mr(mvdev, iotlb);
483	mutex_unlock(&mr->mkey_mtx);
484
485	return err;
486}