Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "vmwgfx_drv.h"
 29#include <drm/ttm/ttm_bo_driver.h>
 30#include <drm/ttm/ttm_placement.h>
 31#include <drm/ttm/ttm_page_alloc.h>
 32
 33static const struct ttm_place vram_placement_flags = {
 34	.fpfn = 0,
 35	.lpfn = 0,
 36	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 37};
 38
 39static const struct ttm_place vram_ne_placement_flags = {
 40	.fpfn = 0,
 41	.lpfn = 0,
 42	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 43};
 44
 45static const struct ttm_place sys_placement_flags = {
 46	.fpfn = 0,
 47	.lpfn = 0,
 48	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
 49};
 50
 51static const struct ttm_place sys_ne_placement_flags = {
 52	.fpfn = 0,
 53	.lpfn = 0,
 54	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 55};
 56
 57static const struct ttm_place gmr_placement_flags = {
 58	.fpfn = 0,
 59	.lpfn = 0,
 60	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 61};
 62
 63static const struct ttm_place gmr_ne_placement_flags = {
 64	.fpfn = 0,
 65	.lpfn = 0,
 66	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 67};
 68
 69static const struct ttm_place mob_placement_flags = {
 70	.fpfn = 0,
 71	.lpfn = 0,
 72	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 73};
 74
 75static const struct ttm_place mob_ne_placement_flags = {
 76	.fpfn = 0,
 77	.lpfn = 0,
 78	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 79};
 80
 81struct ttm_placement vmw_vram_placement = {
 82	.num_placement = 1,
 83	.placement = &vram_placement_flags,
 84	.num_busy_placement = 1,
 85	.busy_placement = &vram_placement_flags
 86};
 87
 88static const struct ttm_place vram_gmr_placement_flags[] = {
 89	{
 90		.fpfn = 0,
 91		.lpfn = 0,
 92		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 93	}, {
 94		.fpfn = 0,
 95		.lpfn = 0,
 96		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 97	}
 98};
 99
100static const struct ttm_place gmr_vram_placement_flags[] = {
101	{
102		.fpfn = 0,
103		.lpfn = 0,
104		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105	}, {
106		.fpfn = 0,
107		.lpfn = 0,
108		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109	}
110};
111
112struct ttm_placement vmw_vram_gmr_placement = {
113	.num_placement = 2,
114	.placement = vram_gmr_placement_flags,
115	.num_busy_placement = 1,
116	.busy_placement = &gmr_placement_flags
117};
118
119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120	{
121		.fpfn = 0,
122		.lpfn = 0,
123		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124			 TTM_PL_FLAG_NO_EVICT
125	}, {
126		.fpfn = 0,
127		.lpfn = 0,
128		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129			 TTM_PL_FLAG_NO_EVICT
130	}
131};
132
133struct ttm_placement vmw_vram_gmr_ne_placement = {
134	.num_placement = 2,
135	.placement = vram_gmr_ne_placement_flags,
136	.num_busy_placement = 1,
137	.busy_placement = &gmr_ne_placement_flags
138};
139
140struct ttm_placement vmw_vram_sys_placement = {
141	.num_placement = 1,
142	.placement = &vram_placement_flags,
143	.num_busy_placement = 1,
144	.busy_placement = &sys_placement_flags
145};
146
147struct ttm_placement vmw_vram_ne_placement = {
148	.num_placement = 1,
149	.placement = &vram_ne_placement_flags,
150	.num_busy_placement = 1,
151	.busy_placement = &vram_ne_placement_flags
152};
153
154struct ttm_placement vmw_sys_placement = {
155	.num_placement = 1,
156	.placement = &sys_placement_flags,
157	.num_busy_placement = 1,
158	.busy_placement = &sys_placement_flags
159};
160
161struct ttm_placement vmw_sys_ne_placement = {
162	.num_placement = 1,
163	.placement = &sys_ne_placement_flags,
164	.num_busy_placement = 1,
165	.busy_placement = &sys_ne_placement_flags
166};
167
168static const struct ttm_place evictable_placement_flags[] = {
169	{
170		.fpfn = 0,
171		.lpfn = 0,
172		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173	}, {
174		.fpfn = 0,
175		.lpfn = 0,
176		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177	}, {
178		.fpfn = 0,
179		.lpfn = 0,
180		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181	}, {
182		.fpfn = 0,
183		.lpfn = 0,
184		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185	}
186};
187
188static const struct ttm_place nonfixed_placement_flags[] = {
189	{
190		.fpfn = 0,
191		.lpfn = 0,
192		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193	}, {
194		.fpfn = 0,
195		.lpfn = 0,
196		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197	}, {
198		.fpfn = 0,
199		.lpfn = 0,
200		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201	}
202};
203
204struct ttm_placement vmw_evictable_placement = {
205	.num_placement = 4,
206	.placement = evictable_placement_flags,
207	.num_busy_placement = 1,
208	.busy_placement = &sys_placement_flags
209};
210
211struct ttm_placement vmw_srf_placement = {
212	.num_placement = 1,
213	.num_busy_placement = 2,
214	.placement = &gmr_placement_flags,
215	.busy_placement = gmr_vram_placement_flags
216};
217
218struct ttm_placement vmw_mob_placement = {
219	.num_placement = 1,
220	.num_busy_placement = 1,
221	.placement = &mob_placement_flags,
222	.busy_placement = &mob_placement_flags
223};
224
225struct ttm_placement vmw_mob_ne_placement = {
226	.num_placement = 1,
227	.num_busy_placement = 1,
228	.placement = &mob_ne_placement_flags,
229	.busy_placement = &mob_ne_placement_flags
230};
231
232struct ttm_placement vmw_nonfixed_placement = {
233	.num_placement = 3,
234	.placement = nonfixed_placement_flags,
235	.num_busy_placement = 1,
236	.busy_placement = &sys_placement_flags
237};
238
239struct vmw_ttm_tt {
240	struct ttm_dma_tt dma_ttm;
241	struct vmw_private *dev_priv;
242	int gmr_id;
243	struct vmw_mob *mob;
244	int mem_type;
245	struct sg_table sgt;
246	struct vmw_sg_table vsgt;
247	uint64_t sg_alloc_size;
248	bool mapped;
249};
250
251const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252
253/**
254 * Helper functions to advance a struct vmw_piter iterator.
255 *
256 * @viter: Pointer to the iterator.
257 *
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
260 * DMA mapping mode.
261 */
262static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263{
264	return ++(viter->i) < viter->num_pages;
265}
266
267static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268{
269	bool ret = __vmw_piter_non_sg_next(viter);
270
271	return __sg_page_iter_dma_next(&viter->iter) && ret;
272}
273
274
275/**
276 * Helper functions to return a pointer to the current page.
277 *
278 * @viter: Pointer to the iterator
279 *
280 * These functions return a pointer to the page currently
281 * pointed to by @viter. Functions are selected depending on the
282 * current mapping mode.
283 */
284static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
285{
286	return viter->pages[viter->i];
287}
288
289/**
290 * Helper functions to return the DMA address of the current page.
291 *
292 * @viter: Pointer to the iterator
293 *
294 * These functions return the DMA address of the page currently
295 * pointed to by @viter. Functions are selected depending on the
296 * current mapping mode.
297 */
298static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
299{
300	return page_to_phys(viter->pages[viter->i]);
301}
302
303static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
304{
305	return viter->addrs[viter->i];
306}
307
308static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
309{
310	return sg_page_iter_dma_address(&viter->iter);
311}
312
313
314/**
315 * vmw_piter_start - Initialize a struct vmw_piter.
316 *
317 * @viter: Pointer to the iterator to initialize
318 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
319 *
320 * Note that we're following the convention of __sg_page_iter_start, so that
321 * the iterator doesn't point to a valid page after initialization; it has
322 * to be advanced one step first.
323 */
324void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
325		     unsigned long p_offset)
326{
327	viter->i = p_offset - 1;
328	viter->num_pages = vsgt->num_pages;
329	viter->page = &__vmw_piter_non_sg_page;
330	viter->pages = vsgt->pages;
331	switch (vsgt->mode) {
332	case vmw_dma_phys:
333		viter->next = &__vmw_piter_non_sg_next;
334		viter->dma_address = &__vmw_piter_phys_addr;
335		break;
336	case vmw_dma_alloc_coherent:
337		viter->next = &__vmw_piter_non_sg_next;
338		viter->dma_address = &__vmw_piter_dma_addr;
339		viter->addrs = vsgt->addrs;
340		break;
341	case vmw_dma_map_populate:
342	case vmw_dma_map_bind:
343		viter->next = &__vmw_piter_sg_next;
344		viter->dma_address = &__vmw_piter_sg_addr;
345		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
346				     vsgt->sgt->orig_nents, p_offset);
347		break;
348	default:
349		BUG();
350	}
351}
352
353/**
354 * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
355 * TTM pages
356 *
357 * @vmw_tt: Pointer to a struct vmw_ttm_backend
358 *
359 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
360 */
361static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
362{
363	struct device *dev = vmw_tt->dev_priv->dev->dev;
364
365	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
366		DMA_BIDIRECTIONAL);
367	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
368}
369
370/**
371 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
372 *
373 * @vmw_tt: Pointer to a struct vmw_ttm_backend
374 *
375 * This function is used to get device addresses from the kernel DMA layer.
376 * However, it's violating the DMA API in that when this operation has been
377 * performed, it's illegal for the CPU to write to the pages without first
378 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
379 * therefore only legal to call this function if we know that the function
380 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
381 * a CPU write buffer flush.
382 */
383static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
384{
385	struct device *dev = vmw_tt->dev_priv->dev->dev;
386	int ret;
387
388	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
389			 DMA_BIDIRECTIONAL);
390	if (unlikely(ret == 0))
391		return -ENOMEM;
392
393	vmw_tt->sgt.nents = ret;
394
395	return 0;
396}
397
398/**
399 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
400 *
401 * @vmw_tt: Pointer to a struct vmw_ttm_tt
402 *
403 * Select the correct function for and make sure the TTM pages are
404 * visible to the device. Allocate storage for the device mappings.
405 * If a mapping has already been performed, indicated by the storage
406 * pointer being non NULL, the function returns success.
407 */
408static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
409{
410	struct vmw_private *dev_priv = vmw_tt->dev_priv;
411	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
412	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
413	struct ttm_operation_ctx ctx = {
414		.interruptible = true,
415		.no_wait_gpu = false
416	};
417	struct vmw_piter iter;
418	dma_addr_t old;
419	int ret = 0;
420	static size_t sgl_size;
421	static size_t sgt_size;
422
423	if (vmw_tt->mapped)
424		return 0;
425
426	vsgt->mode = dev_priv->map_mode;
427	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
428	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
429	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
430	vsgt->sgt = &vmw_tt->sgt;
431
432	switch (dev_priv->map_mode) {
433	case vmw_dma_map_bind:
434	case vmw_dma_map_populate:
435		if (unlikely(!sgl_size)) {
436			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
437			sgt_size = ttm_round_pot(sizeof(struct sg_table));
438		}
439		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
440		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
441		if (unlikely(ret != 0))
442			return ret;
443
444		ret = __sg_alloc_table_from_pages
445			(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
446			 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
447			 dma_get_max_seg_size(dev_priv->dev->dev),
448			 GFP_KERNEL);
449		if (unlikely(ret != 0))
450			goto out_sg_alloc_fail;
451
452		if (vsgt->num_pages > vmw_tt->sgt.nents) {
453			uint64_t over_alloc =
454				sgl_size * (vsgt->num_pages -
455					    vmw_tt->sgt.nents);
456
457			ttm_mem_global_free(glob, over_alloc);
458			vmw_tt->sg_alloc_size -= over_alloc;
459		}
460
461		ret = vmw_ttm_map_for_dma(vmw_tt);
462		if (unlikely(ret != 0))
463			goto out_map_fail;
464
465		break;
466	default:
467		break;
468	}
469
470	old = ~((dma_addr_t) 0);
471	vmw_tt->vsgt.num_regions = 0;
472	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
473		dma_addr_t cur = vmw_piter_dma_addr(&iter);
474
475		if (cur != old + PAGE_SIZE)
476			vmw_tt->vsgt.num_regions++;
477		old = cur;
478	}
479
480	vmw_tt->mapped = true;
481	return 0;
482
483out_map_fail:
484	sg_free_table(vmw_tt->vsgt.sgt);
485	vmw_tt->vsgt.sgt = NULL;
486out_sg_alloc_fail:
487	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
488	return ret;
489}
490
491/**
492 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
493 *
494 * @vmw_tt: Pointer to a struct vmw_ttm_tt
495 *
496 * Tear down any previously set up device DMA mappings and free
497 * any storage space allocated for them. If there are no mappings set up,
498 * this function is a NOP.
499 */
500static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
501{
502	struct vmw_private *dev_priv = vmw_tt->dev_priv;
503
504	if (!vmw_tt->vsgt.sgt)
505		return;
506
507	switch (dev_priv->map_mode) {
508	case vmw_dma_map_bind:
509	case vmw_dma_map_populate:
510		vmw_ttm_unmap_from_dma(vmw_tt);
511		sg_free_table(vmw_tt->vsgt.sgt);
512		vmw_tt->vsgt.sgt = NULL;
513		ttm_mem_global_free(vmw_mem_glob(dev_priv),
514				    vmw_tt->sg_alloc_size);
515		break;
516	default:
517		break;
518	}
519	vmw_tt->mapped = false;
520}
521
522
523/**
524 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
525 *
526 * @bo: Pointer to a struct ttm_buffer_object
527 *
528 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
529 * instead of a pointer to a struct vmw_ttm_backend as argument.
530 * Note that the buffer object must be either pinned or reserved before
531 * calling this function.
532 */
533int vmw_bo_map_dma(struct ttm_buffer_object *bo)
534{
535	struct vmw_ttm_tt *vmw_tt =
536		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
537
538	return vmw_ttm_map_dma(vmw_tt);
539}
540
541
542/**
543 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
544 *
545 * @bo: Pointer to a struct ttm_buffer_object
546 *
547 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
548 * instead of a pointer to a struct vmw_ttm_backend as argument.
549 */
550void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
551{
552	struct vmw_ttm_tt *vmw_tt =
553		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
554
555	vmw_ttm_unmap_dma(vmw_tt);
556}
557
558
559/**
560 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
561 * TTM buffer object
562 *
563 * @bo: Pointer to a struct ttm_buffer_object
564 *
565 * Returns a pointer to a struct vmw_sg_table object. The object should
566 * not be freed after use.
567 * Note that for the device addresses to be valid, the buffer object must
568 * either be reserved or pinned.
569 */
570const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
571{
572	struct vmw_ttm_tt *vmw_tt =
573		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
574
575	return &vmw_tt->vsgt;
576}
577
578
579static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
580{
581	struct vmw_ttm_tt *vmw_be =
582		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
583	int ret;
584
585	ret = vmw_ttm_map_dma(vmw_be);
586	if (unlikely(ret != 0))
587		return ret;
588
589	vmw_be->gmr_id = bo_mem->start;
590	vmw_be->mem_type = bo_mem->mem_type;
591
592	switch (bo_mem->mem_type) {
593	case VMW_PL_GMR:
594		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
595				    ttm->num_pages, vmw_be->gmr_id);
596	case VMW_PL_MOB:
597		if (unlikely(vmw_be->mob == NULL)) {
598			vmw_be->mob =
599				vmw_mob_create(ttm->num_pages);
600			if (unlikely(vmw_be->mob == NULL))
601				return -ENOMEM;
602		}
603
604		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
605				    &vmw_be->vsgt, ttm->num_pages,
606				    vmw_be->gmr_id);
607	default:
608		BUG();
609	}
610	return 0;
611}
612
613static void vmw_ttm_unbind(struct ttm_tt *ttm)
614{
615	struct vmw_ttm_tt *vmw_be =
616		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
617
618	switch (vmw_be->mem_type) {
619	case VMW_PL_GMR:
620		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
621		break;
622	case VMW_PL_MOB:
623		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
624		break;
625	default:
626		BUG();
627	}
628
629	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
630		vmw_ttm_unmap_dma(vmw_be);
631}
632
633
634static void vmw_ttm_destroy(struct ttm_tt *ttm)
635{
636	struct vmw_ttm_tt *vmw_be =
637		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
638
639	vmw_ttm_unmap_dma(vmw_be);
640	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
641		ttm_dma_tt_fini(&vmw_be->dma_ttm);
642	else
643		ttm_tt_fini(ttm);
644
645	if (vmw_be->mob)
646		vmw_mob_destroy(vmw_be->mob);
647
648	kfree(vmw_be);
649}
650
651
652static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
653{
654	struct vmw_ttm_tt *vmw_tt =
655		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
656	struct vmw_private *dev_priv = vmw_tt->dev_priv;
657	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
658	int ret;
659
660	if (ttm->state != tt_unpopulated)
661		return 0;
662
663	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
664		size_t size =
665			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
666		ret = ttm_mem_global_alloc(glob, size, ctx);
667		if (unlikely(ret != 0))
668			return ret;
669
670		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
671					ctx);
672		if (unlikely(ret != 0))
673			ttm_mem_global_free(glob, size);
674	} else
675		ret = ttm_pool_populate(ttm, ctx);
676
677	return ret;
678}
679
680static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
681{
682	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
683						 dma_ttm.ttm);
684	struct vmw_private *dev_priv = vmw_tt->dev_priv;
685	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
686
687
688	if (vmw_tt->mob) {
689		vmw_mob_destroy(vmw_tt->mob);
690		vmw_tt->mob = NULL;
691	}
692
693	vmw_ttm_unmap_dma(vmw_tt);
694	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
695		size_t size =
696			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
697
698		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
699		ttm_mem_global_free(glob, size);
700	} else
701		ttm_pool_unpopulate(ttm);
702}
703
704static struct ttm_backend_func vmw_ttm_func = {
705	.bind = vmw_ttm_bind,
706	.unbind = vmw_ttm_unbind,
707	.destroy = vmw_ttm_destroy,
708};
709
710static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
711					uint32_t page_flags)
712{
713	struct vmw_ttm_tt *vmw_be;
714	int ret;
715
716	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
717	if (!vmw_be)
718		return NULL;
719
720	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
721	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
722	vmw_be->mob = NULL;
723
724	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
725		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
726	else
727		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
728	if (unlikely(ret != 0))
729		goto out_no_init;
730
731	return &vmw_be->dma_ttm.ttm;
732out_no_init:
733	kfree(vmw_be);
734	return NULL;
735}
736
737static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
738		      struct ttm_mem_type_manager *man)
739{
740	switch (type) {
741	case TTM_PL_SYSTEM:
742		/* System memory */
743		man->available_caching = TTM_PL_FLAG_CACHED;
744		man->default_caching = TTM_PL_FLAG_CACHED;
745		break;
746	case TTM_PL_VRAM:
747		/* "On-card" video ram */
748		man->func = &vmw_thp_func;
749		man->flags = TTM_MEMTYPE_FLAG_FIXED;
750		man->available_caching = TTM_PL_FLAG_CACHED;
751		man->default_caching = TTM_PL_FLAG_CACHED;
752		break;
753	case VMW_PL_GMR:
754	case VMW_PL_MOB:
755		/*
756		 * "Guest Memory Regions" is an aperture like feature with
757		 *  one slot per bo. There is an upper limit of the number of
758		 *  slots as well as the bo size.
759		 */
760		man->func = &vmw_gmrid_manager_func;
761		man->available_caching = TTM_PL_FLAG_CACHED;
762		man->default_caching = TTM_PL_FLAG_CACHED;
763		break;
764	default:
765		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
766		return -EINVAL;
767	}
768	return 0;
769}
770
771static void vmw_evict_flags(struct ttm_buffer_object *bo,
772		     struct ttm_placement *placement)
773{
774	*placement = vmw_sys_placement;
775}
776
777static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
778{
779	struct ttm_object_file *tfile =
780		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
781
782	return vmw_user_bo_verify_access(bo, tfile);
783}
784
785static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
786{
787	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
788
789	mem->bus.addr = NULL;
790	mem->bus.is_iomem = false;
791	mem->bus.offset = 0;
792	mem->bus.size = mem->num_pages << PAGE_SHIFT;
793	mem->bus.base = 0;
794
795	switch (mem->mem_type) {
796	case TTM_PL_SYSTEM:
797	case VMW_PL_GMR:
798	case VMW_PL_MOB:
799		return 0;
800	case TTM_PL_VRAM:
801		mem->bus.offset = mem->start << PAGE_SHIFT;
802		mem->bus.base = dev_priv->vram_start;
803		mem->bus.is_iomem = true;
804		break;
805	default:
806		return -EINVAL;
807	}
808	return 0;
809}
810
811/**
812 * vmw_move_notify - TTM move_notify_callback
813 *
814 * @bo: The TTM buffer object about to move.
815 * @mem: The struct ttm_mem_reg indicating to what memory
816 *       region the move is taking place.
817 *
818 * Calls move_notify for all subsystems needing it.
819 * (currently only resources).
820 */
821static void vmw_move_notify(struct ttm_buffer_object *bo,
822			    bool evict,
823			    struct ttm_mem_reg *mem)
824{
825	vmw_bo_move_notify(bo, mem);
826	vmw_query_move_notify(bo, mem);
827}
828
829
830/**
831 * vmw_swap_notify - TTM move_notify_callback
832 *
833 * @bo: The TTM buffer object about to be swapped out.
834 */
835static void vmw_swap_notify(struct ttm_buffer_object *bo)
836{
837	vmw_bo_swap_notify(bo);
838	(void) ttm_bo_wait(bo, false, false);
839}
840
841
842struct ttm_bo_driver vmw_bo_driver = {
843	.ttm_tt_create = &vmw_ttm_tt_create,
844	.ttm_tt_populate = &vmw_ttm_populate,
845	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
846	.init_mem_type = vmw_init_mem_type,
847	.eviction_valuable = ttm_bo_eviction_valuable,
848	.evict_flags = vmw_evict_flags,
849	.move = NULL,
850	.verify_access = vmw_verify_access,
851	.move_notify = vmw_move_notify,
852	.swap_notify = vmw_swap_notify,
853	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
854};