Linux Audio

Check our new training course

Loading...
v4.17
  1/**************************************************************************
  2 *
  3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "vmwgfx_drv.h"
 29#include <drm/ttm/ttm_bo_driver.h>
 30#include <drm/ttm/ttm_placement.h>
 31#include <drm/ttm/ttm_page_alloc.h>
 32
 33static const struct ttm_place vram_placement_flags = {
 34	.fpfn = 0,
 35	.lpfn = 0,
 36	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 37};
 38
 39static const struct ttm_place vram_ne_placement_flags = {
 40	.fpfn = 0,
 41	.lpfn = 0,
 42	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 43};
 44
 45static const struct ttm_place sys_placement_flags = {
 46	.fpfn = 0,
 47	.lpfn = 0,
 48	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
 49};
 50
 51static const struct ttm_place sys_ne_placement_flags = {
 52	.fpfn = 0,
 53	.lpfn = 0,
 54	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 55};
 56
 57static const struct ttm_place gmr_placement_flags = {
 58	.fpfn = 0,
 59	.lpfn = 0,
 60	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 61};
 62
 63static const struct ttm_place gmr_ne_placement_flags = {
 64	.fpfn = 0,
 65	.lpfn = 0,
 66	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 67};
 68
 69static const struct ttm_place mob_placement_flags = {
 70	.fpfn = 0,
 71	.lpfn = 0,
 72	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 73};
 74
 75static const struct ttm_place mob_ne_placement_flags = {
 76	.fpfn = 0,
 77	.lpfn = 0,
 78	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
 79};
 80
 81struct ttm_placement vmw_vram_placement = {
 82	.num_placement = 1,
 83	.placement = &vram_placement_flags,
 84	.num_busy_placement = 1,
 85	.busy_placement = &vram_placement_flags
 86};
 87
 88static const struct ttm_place vram_gmr_placement_flags[] = {
 89	{
 90		.fpfn = 0,
 91		.lpfn = 0,
 92		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
 93	}, {
 94		.fpfn = 0,
 95		.lpfn = 0,
 96		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 97	}
 98};
 99
100static const struct ttm_place gmr_vram_placement_flags[] = {
101	{
102		.fpfn = 0,
103		.lpfn = 0,
104		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105	}, {
106		.fpfn = 0,
107		.lpfn = 0,
108		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109	}
110};
111
112struct ttm_placement vmw_vram_gmr_placement = {
 
 
113	.num_placement = 2,
114	.placement = vram_gmr_placement_flags,
115	.num_busy_placement = 1,
116	.busy_placement = &gmr_placement_flags
117};
118
119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120	{
121		.fpfn = 0,
122		.lpfn = 0,
123		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124			 TTM_PL_FLAG_NO_EVICT
125	}, {
126		.fpfn = 0,
127		.lpfn = 0,
128		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129			 TTM_PL_FLAG_NO_EVICT
130	}
131};
132
133struct ttm_placement vmw_vram_gmr_ne_placement = {
134	.num_placement = 2,
135	.placement = vram_gmr_ne_placement_flags,
136	.num_busy_placement = 1,
137	.busy_placement = &gmr_ne_placement_flags
138};
139
140struct ttm_placement vmw_vram_sys_placement = {
 
 
141	.num_placement = 1,
142	.placement = &vram_placement_flags,
143	.num_busy_placement = 1,
144	.busy_placement = &sys_placement_flags
145};
146
147struct ttm_placement vmw_vram_ne_placement = {
 
 
148	.num_placement = 1,
149	.placement = &vram_ne_placement_flags,
150	.num_busy_placement = 1,
151	.busy_placement = &vram_ne_placement_flags
152};
153
154struct ttm_placement vmw_sys_placement = {
 
 
155	.num_placement = 1,
156	.placement = &sys_placement_flags,
157	.num_busy_placement = 1,
158	.busy_placement = &sys_placement_flags
159};
160
161struct ttm_placement vmw_sys_ne_placement = {
162	.num_placement = 1,
163	.placement = &sys_ne_placement_flags,
164	.num_busy_placement = 1,
165	.busy_placement = &sys_ne_placement_flags
166};
167
168static const struct ttm_place evictable_placement_flags[] = {
169	{
170		.fpfn = 0,
171		.lpfn = 0,
172		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173	}, {
174		.fpfn = 0,
175		.lpfn = 0,
176		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177	}, {
178		.fpfn = 0,
179		.lpfn = 0,
180		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181	}, {
182		.fpfn = 0,
183		.lpfn = 0,
184		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185	}
186};
187
188static const struct ttm_place nonfixed_placement_flags[] = {
189	{
190		.fpfn = 0,
191		.lpfn = 0,
192		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193	}, {
194		.fpfn = 0,
195		.lpfn = 0,
196		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197	}, {
198		.fpfn = 0,
199		.lpfn = 0,
200		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201	}
202};
203
204struct ttm_placement vmw_evictable_placement = {
205	.num_placement = 4,
206	.placement = evictable_placement_flags,
207	.num_busy_placement = 1,
208	.busy_placement = &sys_placement_flags
209};
210
211struct ttm_placement vmw_srf_placement = {
212	.num_placement = 1,
213	.num_busy_placement = 2,
214	.placement = &gmr_placement_flags,
215	.busy_placement = gmr_vram_placement_flags
216};
217
218struct ttm_placement vmw_mob_placement = {
219	.num_placement = 1,
220	.num_busy_placement = 1,
221	.placement = &mob_placement_flags,
222	.busy_placement = &mob_placement_flags
223};
224
225struct ttm_placement vmw_mob_ne_placement = {
226	.num_placement = 1,
227	.num_busy_placement = 1,
228	.placement = &mob_ne_placement_flags,
229	.busy_placement = &mob_ne_placement_flags
230};
231
232struct ttm_placement vmw_nonfixed_placement = {
233	.num_placement = 3,
234	.placement = nonfixed_placement_flags,
235	.num_busy_placement = 1,
236	.busy_placement = &sys_placement_flags
237};
238
239struct vmw_ttm_tt {
240	struct ttm_dma_tt dma_ttm;
241	struct vmw_private *dev_priv;
242	int gmr_id;
243	struct vmw_mob *mob;
244	int mem_type;
245	struct sg_table sgt;
246	struct vmw_sg_table vsgt;
247	uint64_t sg_alloc_size;
248	bool mapped;
249};
250
251const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252
253/**
254 * Helper functions to advance a struct vmw_piter iterator.
255 *
256 * @viter: Pointer to the iterator.
257 *
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
260 * DMA mapping mode.
261 */
262static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263{
264	return ++(viter->i) < viter->num_pages;
265}
266
267static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268{
269	return __sg_page_iter_next(&viter->iter);
270}
271
272
273/**
274 * Helper functions to return a pointer to the current page.
275 *
276 * @viter: Pointer to the iterator
277 *
278 * These functions return a pointer to the page currently
279 * pointed to by @viter. Functions are selected depending on the
280 * current mapping mode.
281 */
282static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
283{
284	return viter->pages[viter->i];
285}
286
287static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
288{
289	return sg_page_iter_page(&viter->iter);
290}
291
292
293/**
294 * Helper functions to return the DMA address of the current page.
295 *
296 * @viter: Pointer to the iterator
297 *
298 * These functions return the DMA address of the page currently
299 * pointed to by @viter. Functions are selected depending on the
300 * current mapping mode.
301 */
302static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
303{
304	return page_to_phys(viter->pages[viter->i]);
305}
306
307static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
308{
309	return viter->addrs[viter->i];
310}
311
312static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
313{
314	return sg_page_iter_dma_address(&viter->iter);
315}
316
317
318/**
319 * vmw_piter_start - Initialize a struct vmw_piter.
320 *
321 * @viter: Pointer to the iterator to initialize
322 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
323 *
324 * Note that we're following the convention of __sg_page_iter_start, so that
325 * the iterator doesn't point to a valid page after initialization; it has
326 * to be advanced one step first.
327 */
328void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
329		     unsigned long p_offset)
330{
331	viter->i = p_offset - 1;
332	viter->num_pages = vsgt->num_pages;
333	switch (vsgt->mode) {
334	case vmw_dma_phys:
335		viter->next = &__vmw_piter_non_sg_next;
336		viter->dma_address = &__vmw_piter_phys_addr;
337		viter->page = &__vmw_piter_non_sg_page;
338		viter->pages = vsgt->pages;
339		break;
340	case vmw_dma_alloc_coherent:
341		viter->next = &__vmw_piter_non_sg_next;
342		viter->dma_address = &__vmw_piter_dma_addr;
343		viter->page = &__vmw_piter_non_sg_page;
344		viter->addrs = vsgt->addrs;
345		viter->pages = vsgt->pages;
346		break;
347	case vmw_dma_map_populate:
348	case vmw_dma_map_bind:
349		viter->next = &__vmw_piter_sg_next;
350		viter->dma_address = &__vmw_piter_sg_addr;
351		viter->page = &__vmw_piter_sg_page;
352		__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
353				     vsgt->sgt->orig_nents, p_offset);
354		break;
355	default:
356		BUG();
357	}
358}
359
360/**
361 * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
362 * TTM pages
363 *
364 * @vmw_tt: Pointer to a struct vmw_ttm_backend
365 *
366 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
367 */
368static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
369{
370	struct device *dev = vmw_tt->dev_priv->dev->dev;
371
372	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
373		DMA_BIDIRECTIONAL);
374	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
375}
376
377/**
378 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
379 *
380 * @vmw_tt: Pointer to a struct vmw_ttm_backend
381 *
382 * This function is used to get device addresses from the kernel DMA layer.
383 * However, it's violating the DMA API in that when this operation has been
384 * performed, it's illegal for the CPU to write to the pages without first
385 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
386 * therefore only legal to call this function if we know that the function
387 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
388 * a CPU write buffer flush.
389 */
390static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
391{
392	struct device *dev = vmw_tt->dev_priv->dev->dev;
393	int ret;
394
395	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
396			 DMA_BIDIRECTIONAL);
397	if (unlikely(ret == 0))
398		return -ENOMEM;
399
400	vmw_tt->sgt.nents = ret;
 
401
402	return 0;
403}
404
405/**
406 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
407 *
408 * @vmw_tt: Pointer to a struct vmw_ttm_tt
409 *
410 * Select the correct function for and make sure the TTM pages are
411 * visible to the device. Allocate storage for the device mappings.
412 * If a mapping has already been performed, indicated by the storage
413 * pointer being non NULL, the function returns success.
414 */
415static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
416{
417	struct vmw_private *dev_priv = vmw_tt->dev_priv;
418	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
419	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
420	struct ttm_operation_ctx ctx = {
421		.interruptible = true,
422		.no_wait_gpu = false
423	};
424	struct vmw_piter iter;
425	dma_addr_t old;
426	int ret = 0;
427	static size_t sgl_size;
428	static size_t sgt_size;
429
430	if (vmw_tt->mapped)
431		return 0;
432
433	vsgt->mode = dev_priv->map_mode;
434	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
435	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
436	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
437	vsgt->sgt = &vmw_tt->sgt;
438
439	switch (dev_priv->map_mode) {
440	case vmw_dma_map_bind:
441	case vmw_dma_map_populate:
442		if (unlikely(!sgl_size)) {
443			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
444			sgt_size = ttm_round_pot(sizeof(struct sg_table));
445		}
446		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
447		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
448		if (unlikely(ret != 0))
449			return ret;
450
451		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
452						vsgt->num_pages, 0,
453						(unsigned long)
454						vsgt->num_pages << PAGE_SHIFT,
455						GFP_KERNEL);
456		if (unlikely(ret != 0))
457			goto out_sg_alloc_fail;
458
459		if (vsgt->num_pages > vmw_tt->sgt.nents) {
460			uint64_t over_alloc =
461				sgl_size * (vsgt->num_pages -
462					    vmw_tt->sgt.nents);
463
464			ttm_mem_global_free(glob, over_alloc);
465			vmw_tt->sg_alloc_size -= over_alloc;
466		}
467
468		ret = vmw_ttm_map_for_dma(vmw_tt);
469		if (unlikely(ret != 0))
470			goto out_map_fail;
471
472		break;
473	default:
474		break;
475	}
476
477	old = ~((dma_addr_t) 0);
478	vmw_tt->vsgt.num_regions = 0;
479	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
480		dma_addr_t cur = vmw_piter_dma_addr(&iter);
481
482		if (cur != old + PAGE_SIZE)
483			vmw_tt->vsgt.num_regions++;
484		old = cur;
485	}
486
487	vmw_tt->mapped = true;
488	return 0;
489
490out_map_fail:
491	sg_free_table(vmw_tt->vsgt.sgt);
492	vmw_tt->vsgt.sgt = NULL;
493out_sg_alloc_fail:
494	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
495	return ret;
496}
497
498/**
499 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
500 *
501 * @vmw_tt: Pointer to a struct vmw_ttm_tt
502 *
503 * Tear down any previously set up device DMA mappings and free
504 * any storage space allocated for them. If there are no mappings set up,
505 * this function is a NOP.
506 */
507static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
508{
509	struct vmw_private *dev_priv = vmw_tt->dev_priv;
510
511	if (!vmw_tt->vsgt.sgt)
512		return;
513
514	switch (dev_priv->map_mode) {
515	case vmw_dma_map_bind:
516	case vmw_dma_map_populate:
517		vmw_ttm_unmap_from_dma(vmw_tt);
518		sg_free_table(vmw_tt->vsgt.sgt);
519		vmw_tt->vsgt.sgt = NULL;
520		ttm_mem_global_free(vmw_mem_glob(dev_priv),
521				    vmw_tt->sg_alloc_size);
522		break;
523	default:
524		break;
525	}
526	vmw_tt->mapped = false;
527}
528
529
530/**
531 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
532 *
533 * @bo: Pointer to a struct ttm_buffer_object
534 *
535 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
536 * instead of a pointer to a struct vmw_ttm_backend as argument.
537 * Note that the buffer object must be either pinned or reserved before
538 * calling this function.
539 */
540int vmw_bo_map_dma(struct ttm_buffer_object *bo)
541{
542	struct vmw_ttm_tt *vmw_tt =
543		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
544
545	return vmw_ttm_map_dma(vmw_tt);
546}
547
548
549/**
550 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
551 *
552 * @bo: Pointer to a struct ttm_buffer_object
553 *
554 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
555 * instead of a pointer to a struct vmw_ttm_backend as argument.
556 */
557void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
558{
559	struct vmw_ttm_tt *vmw_tt =
560		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
561
562	vmw_ttm_unmap_dma(vmw_tt);
563}
564
565
566/**
567 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
568 * TTM buffer object
569 *
570 * @bo: Pointer to a struct ttm_buffer_object
571 *
572 * Returns a pointer to a struct vmw_sg_table object. The object should
573 * not be freed after use.
574 * Note that for the device addresses to be valid, the buffer object must
575 * either be reserved or pinned.
576 */
577const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
578{
579	struct vmw_ttm_tt *vmw_tt =
580		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
581
582	return &vmw_tt->vsgt;
583}
584
585
586static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
587{
588	struct vmw_ttm_tt *vmw_be =
589		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
590	int ret;
591
592	ret = vmw_ttm_map_dma(vmw_be);
593	if (unlikely(ret != 0))
594		return ret;
595
596	vmw_be->gmr_id = bo_mem->start;
597	vmw_be->mem_type = bo_mem->mem_type;
598
599	switch (bo_mem->mem_type) {
600	case VMW_PL_GMR:
601		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
602				    ttm->num_pages, vmw_be->gmr_id);
603	case VMW_PL_MOB:
604		if (unlikely(vmw_be->mob == NULL)) {
605			vmw_be->mob =
606				vmw_mob_create(ttm->num_pages);
607			if (unlikely(vmw_be->mob == NULL))
608				return -ENOMEM;
609		}
610
611		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
612				    &vmw_be->vsgt, ttm->num_pages,
613				    vmw_be->gmr_id);
614	default:
615		BUG();
616	}
617	return 0;
618}
619
620static int vmw_ttm_unbind(struct ttm_tt *ttm)
621{
622	struct vmw_ttm_tt *vmw_be =
623		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
624
625	switch (vmw_be->mem_type) {
626	case VMW_PL_GMR:
627		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
628		break;
629	case VMW_PL_MOB:
630		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
631		break;
632	default:
633		BUG();
634	}
635
636	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
637		vmw_ttm_unmap_dma(vmw_be);
638
 
639	return 0;
640}
641
642
643static void vmw_ttm_destroy(struct ttm_tt *ttm)
644{
645	struct vmw_ttm_tt *vmw_be =
646		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
647
648	vmw_ttm_unmap_dma(vmw_be);
649	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
650		ttm_dma_tt_fini(&vmw_be->dma_ttm);
651	else
652		ttm_tt_fini(ttm);
653
654	if (vmw_be->mob)
655		vmw_mob_destroy(vmw_be->mob);
656
657	kfree(vmw_be);
 
658}
659
660
661static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
662{
663	struct vmw_ttm_tt *vmw_tt =
664		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
665	struct vmw_private *dev_priv = vmw_tt->dev_priv;
666	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
667	int ret;
668
669	if (ttm->state != tt_unpopulated)
670		return 0;
671
672	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
673		size_t size =
674			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
675		ret = ttm_mem_global_alloc(glob, size, ctx);
676		if (unlikely(ret != 0))
677			return ret;
678
679		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
680					ctx);
681		if (unlikely(ret != 0))
682			ttm_mem_global_free(glob, size);
683	} else
684		ret = ttm_pool_populate(ttm, ctx);
685
686	return ret;
687}
688
689static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
690{
691	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
692						 dma_ttm.ttm);
693	struct vmw_private *dev_priv = vmw_tt->dev_priv;
694	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
695
696
697	if (vmw_tt->mob) {
698		vmw_mob_destroy(vmw_tt->mob);
699		vmw_tt->mob = NULL;
700	}
701
702	vmw_ttm_unmap_dma(vmw_tt);
703	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
704		size_t size =
705			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
706
707		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
708		ttm_mem_global_free(glob, size);
709	} else
710		ttm_pool_unpopulate(ttm);
711}
712
713static struct ttm_backend_func vmw_ttm_func = {
 
 
714	.bind = vmw_ttm_bind,
715	.unbind = vmw_ttm_unbind,
716	.destroy = vmw_ttm_destroy,
717};
718
719static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
720					uint32_t page_flags)
721{
722	struct vmw_ttm_tt *vmw_be;
723	int ret;
724
725	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
726	if (!vmw_be)
727		return NULL;
728
729	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
730	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
731	vmw_be->mob = NULL;
732
733	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
734		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
735	else
736		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
737	if (unlikely(ret != 0))
738		goto out_no_init;
739
740	return &vmw_be->dma_ttm.ttm;
741out_no_init:
742	kfree(vmw_be);
743	return NULL;
744}
745
746static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
747{
748	return 0;
749}
750
751static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
752		      struct ttm_mem_type_manager *man)
753{
754	switch (type) {
755	case TTM_PL_SYSTEM:
756		/* System memory */
757
758		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
759		man->available_caching = TTM_PL_FLAG_CACHED;
760		man->default_caching = TTM_PL_FLAG_CACHED;
761		break;
762	case TTM_PL_VRAM:
763		/* "On-card" video ram */
764		man->func = &ttm_bo_manager_func;
765		man->gpu_offset = 0;
766		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
767		man->available_caching = TTM_PL_FLAG_CACHED;
768		man->default_caching = TTM_PL_FLAG_CACHED;
769		break;
770	case VMW_PL_GMR:
771	case VMW_PL_MOB:
772		/*
773		 * "Guest Memory Regions" is an aperture like feature with
774		 *  one slot per bo. There is an upper limit of the number of
775		 *  slots as well as the bo size.
776		 */
777		man->func = &vmw_gmrid_manager_func;
778		man->gpu_offset = 0;
779		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
780		man->available_caching = TTM_PL_FLAG_CACHED;
781		man->default_caching = TTM_PL_FLAG_CACHED;
782		break;
783	default:
784		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
785		return -EINVAL;
786	}
787	return 0;
788}
789
790static void vmw_evict_flags(struct ttm_buffer_object *bo,
791		     struct ttm_placement *placement)
792{
793	*placement = vmw_sys_placement;
794}
795
 
 
 
 
796static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
797{
798	struct ttm_object_file *tfile =
799		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
800
801	return vmw_user_dmabuf_verify_access(bo, tfile);
802}
803
804static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
805{
806	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
807	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
808
809	mem->bus.addr = NULL;
810	mem->bus.is_iomem = false;
811	mem->bus.offset = 0;
812	mem->bus.size = mem->num_pages << PAGE_SHIFT;
813	mem->bus.base = 0;
814	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
815		return -EINVAL;
816	switch (mem->mem_type) {
817	case TTM_PL_SYSTEM:
818	case VMW_PL_GMR:
819	case VMW_PL_MOB:
820		return 0;
821	case TTM_PL_VRAM:
822		mem->bus.offset = mem->start << PAGE_SHIFT;
823		mem->bus.base = dev_priv->vram_start;
824		mem->bus.is_iomem = true;
825		break;
826	default:
827		return -EINVAL;
828	}
829	return 0;
830}
831
832static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
833{
834}
835
836static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
837{
838	return 0;
839}
840
841/**
842 * vmw_move_notify - TTM move_notify_callback
843 *
844 * @bo: The TTM buffer object about to move.
845 * @mem: The struct ttm_mem_reg indicating to what memory
846 *       region the move is taking place.
847 *
848 * Calls move_notify for all subsystems needing it.
849 * (currently only resources).
850 */
851static void vmw_move_notify(struct ttm_buffer_object *bo,
852			    bool evict,
853			    struct ttm_mem_reg *mem)
854{
855	vmw_resource_move_notify(bo, mem);
856	vmw_query_move_notify(bo, mem);
857}
858
 
 
 
 
859
860/**
861 * vmw_swap_notify - TTM move_notify_callback
862 *
863 * @bo: The TTM buffer object about to be swapped out.
864 */
865static void vmw_swap_notify(struct ttm_buffer_object *bo)
866{
867	vmw_resource_swap_notify(bo);
868	(void) ttm_bo_wait(bo, false, false);
 
 
 
 
869}
870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
871
872struct ttm_bo_driver vmw_bo_driver = {
873	.ttm_tt_create = &vmw_ttm_tt_create,
874	.ttm_tt_populate = &vmw_ttm_populate,
875	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
876	.invalidate_caches = vmw_invalidate_caches,
877	.init_mem_type = vmw_init_mem_type,
878	.eviction_valuable = ttm_bo_eviction_valuable,
879	.evict_flags = vmw_evict_flags,
880	.move = NULL,
881	.verify_access = vmw_verify_access,
882	.move_notify = vmw_move_notify,
883	.swap_notify = vmw_swap_notify,
 
 
 
 
 
884	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
885	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
886	.io_mem_free = &vmw_ttm_io_mem_free,
887};
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "vmwgfx_drv.h"
 29#include "ttm/ttm_bo_driver.h"
 30#include "ttm/ttm_placement.h"
 
 31
 32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
 33	TTM_PL_FLAG_CACHED;
 
 
 
 
 
 
 
 
 
 34
 35static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
 36	TTM_PL_FLAG_CACHED |
 37	TTM_PL_FLAG_NO_EVICT;
 
 
 38
 39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
 40	TTM_PL_FLAG_CACHED;
 
 
 
 41
 42static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
 43	TTM_PL_FLAG_CACHED;
 
 
 
 44
 45struct ttm_placement vmw_vram_placement = {
 
 
 
 
 
 
 
 
 
 
 
 
 46	.fpfn = 0,
 47	.lpfn = 0,
 
 
 
 
 48	.num_placement = 1,
 49	.placement = &vram_placement_flags,
 50	.num_busy_placement = 1,
 51	.busy_placement = &vram_placement_flags
 52};
 53
 54static uint32_t vram_gmr_placement_flags[] = {
 55	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
 56	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57};
 58
 59struct ttm_placement vmw_vram_gmr_placement = {
 60	.fpfn = 0,
 61	.lpfn = 0,
 62	.num_placement = 2,
 63	.placement = vram_gmr_placement_flags,
 64	.num_busy_placement = 1,
 65	.busy_placement = &gmr_placement_flags
 66};
 67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68struct ttm_placement vmw_vram_sys_placement = {
 69	.fpfn = 0,
 70	.lpfn = 0,
 71	.num_placement = 1,
 72	.placement = &vram_placement_flags,
 73	.num_busy_placement = 1,
 74	.busy_placement = &sys_placement_flags
 75};
 76
 77struct ttm_placement vmw_vram_ne_placement = {
 78	.fpfn = 0,
 79	.lpfn = 0,
 80	.num_placement = 1,
 81	.placement = &vram_ne_placement_flags,
 82	.num_busy_placement = 1,
 83	.busy_placement = &vram_ne_placement_flags
 84};
 85
 86struct ttm_placement vmw_sys_placement = {
 87	.fpfn = 0,
 88	.lpfn = 0,
 89	.num_placement = 1,
 90	.placement = &sys_placement_flags,
 91	.num_busy_placement = 1,
 92	.busy_placement = &sys_placement_flags
 93};
 94
 95struct vmw_ttm_backend {
 96	struct ttm_backend backend;
 97	struct page **pages;
 98	unsigned long num_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99	struct vmw_private *dev_priv;
100	int gmr_id;
 
 
 
 
 
 
101};
102
103static int vmw_ttm_populate(struct ttm_backend *backend,
104			    unsigned long num_pages, struct page **pages,
105			    struct page *dummy_read_page,
106			    dma_addr_t *dma_addrs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107{
108	struct vmw_ttm_backend *vmw_be =
109	    container_of(backend, struct vmw_ttm_backend, backend);
 
 
 
 
 
110
111	vmw_be->pages = pages;
112	vmw_be->num_pages = num_pages;
113
114	return 0;
115}
116
117static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118{
119	struct vmw_ttm_backend *vmw_be =
120	    container_of(backend, struct vmw_ttm_backend, backend);
 
 
 
 
 
121
122	vmw_be->gmr_id = bo_mem->start;
 
123
124	return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
125			    vmw_be->num_pages, vmw_be->gmr_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126}
127
128static int vmw_ttm_unbind(struct ttm_backend *backend)
129{
130	struct vmw_ttm_backend *vmw_be =
131	    container_of(backend, struct vmw_ttm_backend, backend);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
133	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
134	return 0;
135}
136
137static void vmw_ttm_clear(struct ttm_backend *backend)
 
138{
139	struct vmw_ttm_backend *vmw_be =
140		container_of(backend, struct vmw_ttm_backend, backend);
 
 
 
 
 
 
 
 
 
141
142	vmw_be->pages = NULL;
143	vmw_be->num_pages = 0;
144}
145
146static void vmw_ttm_destroy(struct ttm_backend *backend)
 
147{
148	struct vmw_ttm_backend *vmw_be =
149	    container_of(backend, struct vmw_ttm_backend, backend);
 
 
 
 
 
 
150
151	kfree(vmw_be);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152}
153
154static struct ttm_backend_func vmw_ttm_func = {
155	.populate = vmw_ttm_populate,
156	.clear = vmw_ttm_clear,
157	.bind = vmw_ttm_bind,
158	.unbind = vmw_ttm_unbind,
159	.destroy = vmw_ttm_destroy,
160};
161
162struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
 
163{
164	struct vmw_ttm_backend *vmw_be;
 
165
166	vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
167	if (!vmw_be)
168		return NULL;
169
170	vmw_be->backend.func = &vmw_ttm_func;
171	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
 
 
 
 
 
 
 
172
173	return &vmw_be->backend;
 
 
 
174}
175
176int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
177{
178	return 0;
179}
180
181int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
182		      struct ttm_mem_type_manager *man)
183{
184	switch (type) {
185	case TTM_PL_SYSTEM:
186		/* System memory */
187
188		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
189		man->available_caching = TTM_PL_FLAG_CACHED;
190		man->default_caching = TTM_PL_FLAG_CACHED;
191		break;
192	case TTM_PL_VRAM:
193		/* "On-card" video ram */
194		man->func = &ttm_bo_manager_func;
195		man->gpu_offset = 0;
196		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
197		man->available_caching = TTM_PL_FLAG_CACHED;
198		man->default_caching = TTM_PL_FLAG_CACHED;
199		break;
200	case VMW_PL_GMR:
 
201		/*
202		 * "Guest Memory Regions" is an aperture like feature with
203		 *  one slot per bo. There is an upper limit of the number of
204		 *  slots as well as the bo size.
205		 */
206		man->func = &vmw_gmrid_manager_func;
207		man->gpu_offset = 0;
208		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
209		man->available_caching = TTM_PL_FLAG_CACHED;
210		man->default_caching = TTM_PL_FLAG_CACHED;
211		break;
212	default:
213		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
214		return -EINVAL;
215	}
216	return 0;
217}
218
219void vmw_evict_flags(struct ttm_buffer_object *bo,
220		     struct ttm_placement *placement)
221{
222	*placement = vmw_sys_placement;
223}
224
225/**
226 * FIXME: Proper access checks on buffers.
227 */
228
229static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
230{
231	return 0;
 
 
 
232}
233
234static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
235{
236	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
237	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
238
239	mem->bus.addr = NULL;
240	mem->bus.is_iomem = false;
241	mem->bus.offset = 0;
242	mem->bus.size = mem->num_pages << PAGE_SHIFT;
243	mem->bus.base = 0;
244	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
245		return -EINVAL;
246	switch (mem->mem_type) {
247	case TTM_PL_SYSTEM:
248	case VMW_PL_GMR:
 
249		return 0;
250	case TTM_PL_VRAM:
251		mem->bus.offset = mem->start << PAGE_SHIFT;
252		mem->bus.base = dev_priv->vram_start;
253		mem->bus.is_iomem = true;
254		break;
255	default:
256		return -EINVAL;
257	}
258	return 0;
259}
260
261static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
262{
263}
264
265static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
266{
267	return 0;
268}
269
270/**
271 * FIXME: We're using the old vmware polling method to sync.
272 * Do this with fences instead.
 
 
 
 
 
 
273 */
274
275static void *vmw_sync_obj_ref(void *sync_obj)
 
276{
277	return sync_obj;
 
278}
279
280static void vmw_sync_obj_unref(void **sync_obj)
281{
282	*sync_obj = NULL;
283}
284
285static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
 
 
 
 
 
286{
287	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
288
289	mutex_lock(&dev_priv->hw_mutex);
290	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
291	mutex_unlock(&dev_priv->hw_mutex);
292	return 0;
293}
294
295static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
296{
297	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
298	uint32_t sequence = (unsigned long) sync_obj;
299
300	return vmw_fence_signaled(dev_priv, sequence);
301}
302
303static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
304			     bool lazy, bool interruptible)
305{
306	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
307	uint32_t sequence = (unsigned long) sync_obj;
308
309	return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
310}
311
312struct ttm_bo_driver vmw_bo_driver = {
313	.create_ttm_backend_entry = vmw_ttm_backend_init,
 
 
314	.invalidate_caches = vmw_invalidate_caches,
315	.init_mem_type = vmw_init_mem_type,
 
316	.evict_flags = vmw_evict_flags,
317	.move = NULL,
318	.verify_access = vmw_verify_access,
319	.sync_obj_signaled = vmw_sync_obj_signaled,
320	.sync_obj_wait = vmw_sync_obj_wait,
321	.sync_obj_flush = vmw_sync_obj_flush,
322	.sync_obj_unref = vmw_sync_obj_unref,
323	.sync_obj_ref = vmw_sync_obj_ref,
324	.move_notify = NULL,
325	.swap_notify = NULL,
326	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
327	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
328	.io_mem_free = &vmw_ttm_io_mem_free,
329};