Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2008-2012 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *    Chris Wilson <chris@chris-wilson.co.uk>
 26 *
 27 */
 28
 29#include <drm/drmP.h>
 30#include <drm/i915_drm.h>
 31#include "i915_drv.h"
 32
 33#define KB(x) ((x) * 1024)
 34#define MB(x) (KB(x) * 1024)
 35
 36/*
 37 * The BIOS typically reserves some of the system's memory for the exclusive
 38 * use of the integrated graphics. This memory is no longer available for
 39 * use by the OS and so the user finds that his system has less memory
 40 * available than he put in. We refer to this memory as stolen.
 41 *
 42 * The BIOS will allocate its framebuffer from the stolen memory. Our
 43 * goal is try to reuse that object for our own fbcon which must always
 44 * be available for panics. Anything else we can reuse the stolen memory
 45 * for is a boon.
 46 */
 47
 48int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 49					 struct drm_mm_node *node, u64 size,
 50					 unsigned alignment, u64 start, u64 end)
 51{
 52	int ret;
 53
 54	if (!drm_mm_initialized(&dev_priv->mm.stolen))
 55		return -ENODEV;
 56
 57	/* See the comment at the drm_mm_init() call for more about this check.
 58	 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
 59	if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
 
 60		start = 4096;
 61
 62	mutex_lock(&dev_priv->mm.stolen_lock);
 63	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
 64					  alignment, start, end,
 65					  DRM_MM_SEARCH_DEFAULT);
 66	mutex_unlock(&dev_priv->mm.stolen_lock);
 67
 68	return ret;
 69}
 70
 71int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
 72				struct drm_mm_node *node, u64 size,
 73				unsigned alignment)
 74{
 
 
 75	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
 76					alignment, 0,
 77					dev_priv->gtt.stolen_usable_size);
 78}
 79
 80void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 81				 struct drm_mm_node *node)
 82{
 83	mutex_lock(&dev_priv->mm.stolen_lock);
 84	drm_mm_remove_node(node);
 85	mutex_unlock(&dev_priv->mm.stolen_lock);
 86}
 87
 88static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 89{
 90	struct drm_i915_private *dev_priv = dev->dev_private;
 
 91	struct resource *r;
 92	u32 base;
 93
 94	/* Almost universally we can find the Graphics Base of Stolen Memory
 95	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
 96	 * machines this is also mirrored in the bridge device at different
 97	 * locations, or in the MCHBAR.
 98	 *
 99	 * On 865 we just check the TOUD register.
100	 *
101	 * On 830/845/85x the stolen memory base isn't available in any
102	 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
103	 *
104	 */
105	base = 0;
106	if (INTEL_INFO(dev)->gen >= 3) {
107		/* Read Graphics Base of Stolen Memory directly */
108		pci_read_config_dword(dev->pdev, 0x5c, &base);
109		base &= ~((1<<20) - 1);
110	} else if (IS_I865G(dev)) {
 
 
 
111		u16 toud = 0;
 
112
113		/*
114		 * FIXME is the graphics stolen memory region
115		 * always at TOUD? Ie. is it always the last
116		 * one to be allocated by the BIOS?
117		 */
118		pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
 
 
 
 
 
 
 
 
 
119					 I865_TOUD, &toud);
120
121		base = toud << 16;
122	} else if (IS_I85X(dev)) {
123		u32 tseg_size = 0;
124		u32 tom;
125		u8 tmp;
126
127		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
128					 I85X_ESMRAMC, &tmp);
129
130		if (tmp & TSEG_ENABLE)
131			tseg_size = MB(1);
132
133		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
134					 I85X_DRB3, &tmp);
135		tom = tmp * MB(32);
136
137		base = tom - tseg_size - dev_priv->gtt.stolen_size;
138	} else if (IS_845G(dev)) {
139		u32 tseg_size = 0;
140		u32 tom;
141		u8 tmp;
142
143		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
144					 I845_ESMRAMC, &tmp);
145
146		if (tmp & TSEG_ENABLE) {
147			switch (tmp & I845_TSEG_SIZE_MASK) {
148			case I845_TSEG_SIZE_512K:
149				tseg_size = KB(512);
150				break;
151			case I845_TSEG_SIZE_1M:
152				tseg_size = MB(1);
153				break;
154			}
155		}
156
157		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
158					 I830_DRB3, &tmp);
159		tom = tmp * MB(32);
160
161		base = tom - tseg_size - dev_priv->gtt.stolen_size;
162	} else if (IS_I830(dev)) {
163		u32 tseg_size = 0;
164		u32 tom;
165		u8 tmp;
166
167		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
168					 I830_ESMRAMC, &tmp);
169
170		if (tmp & TSEG_ENABLE) {
171			if (tmp & I830_TSEG_SIZE_1M)
172				tseg_size = MB(1);
173			else
174				tseg_size = KB(512);
175		}
176
177		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
178					 I830_DRB3, &tmp);
179		tom = tmp * MB(32);
180
181		base = tom - tseg_size - dev_priv->gtt.stolen_size;
182	}
183
184	if (base == 0)
185		return 0;
186
187	/* make sure we don't clobber the GTT if it's within stolen memory */
188	if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
 
189		struct {
190			u32 start, end;
191		} stolen[2] = {
192			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
193			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
194		};
195		u64 gtt_start, gtt_end;
196
197		gtt_start = I915_READ(PGTBL_CTL);
198		if (IS_GEN4(dev))
199			gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
200				(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
201		else
202			gtt_start &= PGTBL_ADDRESS_LO_MASK;
203		gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
204
205		if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
206			stolen[0].end = gtt_start;
207		if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
208			stolen[1].start = gtt_end;
209
210		/* pick the larger of the two chunks */
211		if (stolen[0].end - stolen[0].start >
212		    stolen[1].end - stolen[1].start) {
213			base = stolen[0].start;
214			dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
215		} else {
216			base = stolen[1].start;
217			dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
218		}
219
220		if (stolen[0].start != stolen[1].start ||
221		    stolen[0].end != stolen[1].end) {
222			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
223				      (unsigned long long) gtt_start,
224				      (unsigned long long) gtt_end - 1);
225			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
226				      base, base + (u32) dev_priv->gtt.stolen_size - 1);
227		}
228	}
229
230
231	/* Verify that nothing else uses this physical address. Stolen
232	 * memory should be reserved by the BIOS and hidden from the
233	 * kernel. So if the region is already marked as busy, something
234	 * is seriously wrong.
235	 */
236	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
237				    "Graphics Stolen Memory");
238	if (r == NULL) {
239		/*
240		 * One more attempt but this time requesting region from
241		 * base + 1, as we have seen that this resolves the region
242		 * conflict with the PCI Bus.
243		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
244		 * PCI bus, but have an off-by-one error. Hence retry the
245		 * reservation starting from 1 instead of 0.
246		 */
247		r = devm_request_mem_region(dev->dev, base + 1,
248					    dev_priv->gtt.stolen_size - 1,
249					    "Graphics Stolen Memory");
250		/*
251		 * GEN3 firmware likes to smash pci bridges into the stolen
252		 * range. Apparently this works.
253		 */
254		if (r == NULL && !IS_GEN3(dev)) {
255			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
256				  base, base + (uint32_t)dev_priv->gtt.stolen_size);
257			base = 0;
258		}
259	}
260
261	return base;
262}
263
264void i915_gem_cleanup_stolen(struct drm_device *dev)
265{
266	struct drm_i915_private *dev_priv = dev->dev_private;
267
268	if (!drm_mm_initialized(&dev_priv->mm.stolen))
269		return;
270
271	drm_mm_takedown(&dev_priv->mm.stolen);
272}
273
274static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
275				    unsigned long *base, unsigned long *size)
276{
 
277	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
278				     CTG_STOLEN_RESERVED :
279				     ELK_STOLEN_RESERVED);
280	unsigned long stolen_top = dev_priv->mm.stolen_base +
281		dev_priv->gtt.stolen_size;
282
283	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
284
285	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
286
287	/* On these platforms, the register doesn't have a size field, so the
288	 * size is the distance between the base and the top of the stolen
289	 * memory. We also have the genuine case where base is zero and there's
290	 * nothing reserved. */
291	if (*base == 0)
292		*size = 0;
293	else
294		*size = stolen_top - *base;
295}
296
297static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
298				     unsigned long *base, unsigned long *size)
299{
300	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
301
302	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
303
304	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
305	case GEN6_STOLEN_RESERVED_1M:
306		*size = 1024 * 1024;
307		break;
308	case GEN6_STOLEN_RESERVED_512K:
309		*size = 512 * 1024;
310		break;
311	case GEN6_STOLEN_RESERVED_256K:
312		*size = 256 * 1024;
313		break;
314	case GEN6_STOLEN_RESERVED_128K:
315		*size = 128 * 1024;
316		break;
317	default:
318		*size = 1024 * 1024;
319		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
320	}
321}
322
323static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
324				     unsigned long *base, unsigned long *size)
325{
326	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
327
328	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
329
330	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
331	case GEN7_STOLEN_RESERVED_1M:
332		*size = 1024 * 1024;
333		break;
334	case GEN7_STOLEN_RESERVED_256K:
335		*size = 256 * 1024;
336		break;
337	default:
338		*size = 1024 * 1024;
339		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
340	}
341}
342
343static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
344				     unsigned long *base, unsigned long *size)
345{
346	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
347
348	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
349
350	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
351	case GEN8_STOLEN_RESERVED_1M:
352		*size = 1024 * 1024;
353		break;
354	case GEN8_STOLEN_RESERVED_2M:
355		*size = 2 * 1024 * 1024;
356		break;
357	case GEN8_STOLEN_RESERVED_4M:
358		*size = 4 * 1024 * 1024;
359		break;
360	case GEN8_STOLEN_RESERVED_8M:
361		*size = 8 * 1024 * 1024;
362		break;
363	default:
364		*size = 8 * 1024 * 1024;
365		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
366	}
367}
368
369static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
370				    unsigned long *base, unsigned long *size)
371{
 
372	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
373	unsigned long stolen_top;
374
375	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
376
377	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
378
379	/* On these platforms, the register doesn't have a size field, so the
380	 * size is the distance between the base and the top of the stolen
381	 * memory. We also have the genuine case where base is zero and there's
382	 * nothing reserved. */
383	if (*base == 0)
384		*size = 0;
385	else
386		*size = stolen_top - *base;
387}
388
389int i915_gem_init_stolen(struct drm_device *dev)
390{
391	struct drm_i915_private *dev_priv = dev->dev_private;
392	unsigned long reserved_total, reserved_base = 0, reserved_size;
393	unsigned long stolen_top;
394
395	mutex_init(&dev_priv->mm.stolen_lock);
396
 
 
 
 
 
397#ifdef CONFIG_INTEL_IOMMU
398	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
399		DRM_INFO("DMAR active, disabling use of stolen memory\n");
400		return 0;
401	}
402#endif
403
404	if (dev_priv->gtt.stolen_size == 0)
405		return 0;
406
407	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
408	if (dev_priv->mm.stolen_base == 0)
409		return 0;
410
411	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
412
413	switch (INTEL_INFO(dev_priv)->gen) {
414	case 2:
415	case 3:
416		break;
417	case 4:
418		if (IS_G4X(dev))
419			g4x_get_stolen_reserved(dev_priv, &reserved_base,
420						&reserved_size);
421		break;
422	case 5:
423		/* Assume the gen6 maximum for the older platforms. */
424		reserved_size = 1024 * 1024;
425		reserved_base = stolen_top - reserved_size;
426		break;
427	case 6:
428		gen6_get_stolen_reserved(dev_priv, &reserved_base,
429					 &reserved_size);
430		break;
431	case 7:
432		gen7_get_stolen_reserved(dev_priv, &reserved_base,
433					 &reserved_size);
434		break;
435	default:
436		if (IS_BROADWELL(dev_priv) ||
437		    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
438			bdw_get_stolen_reserved(dev_priv, &reserved_base,
439						&reserved_size);
440		else
441			gen8_get_stolen_reserved(dev_priv, &reserved_base,
442						 &reserved_size);
443		break;
444	}
445
446	/* It is possible for the reserved base to be zero, but the register
447	 * field for size doesn't have a zero option. */
448	if (reserved_base == 0) {
449		reserved_size = 0;
450		reserved_base = stolen_top;
451	}
452
453	if (reserved_base < dev_priv->mm.stolen_base ||
454	    reserved_base + reserved_size > stolen_top) {
455		DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
456			      reserved_base, reserved_base + reserved_size,
457			      dev_priv->mm.stolen_base, stolen_top);
458		return 0;
459	}
460
461	dev_priv->gtt.stolen_reserved_base = reserved_base;
462	dev_priv->gtt.stolen_reserved_size = reserved_size;
463
464	/* It is possible for the reserved area to end before the end of stolen
465	 * memory, so just consider the start. */
466	reserved_total = stolen_top - reserved_base;
467
468	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
469		      dev_priv->gtt.stolen_size >> 10,
470		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
471
472	dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
473					   reserved_total;
474
475	/*
476	 * Basic memrange allocator for stolen space.
477	 *
478	 * TODO: Notice that some platforms require us to not use the first page
479	 * of the stolen memory but their BIOSes may still put the framebuffer
480	 * on the first page. So we don't reserve this page for now because of
481	 * that. Our current solution is to just prevent new nodes from being
482	 * inserted on the first page - see the check we have at
483	 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
484	 * problem later.
485	 */
486	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
487
488	return 0;
489}
490
491static struct sg_table *
492i915_pages_create_for_stolen(struct drm_device *dev,
493			     u32 offset, u32 size)
494{
495	struct drm_i915_private *dev_priv = dev->dev_private;
496	struct sg_table *st;
497	struct scatterlist *sg;
498
499	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
500	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
501
502	/* We hide that we have no struct page backing our stolen object
503	 * by wrapping the contiguous physical allocation with a fake
504	 * dma mapping in a single scatterlist.
505	 */
506
507	st = kmalloc(sizeof(*st), GFP_KERNEL);
508	if (st == NULL)
509		return NULL;
510
511	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
512		kfree(st);
513		return NULL;
514	}
515
516	sg = st->sgl;
517	sg->offset = 0;
518	sg->length = size;
519
520	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
521	sg_dma_len(sg) = size;
522
523	return st;
524}
525
526static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 
527{
528	BUG();
529	return -EINVAL;
 
530}
531
532static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 
533{
534	/* Should only be called during free */
535	sg_free_table(obj->pages);
536	kfree(obj->pages);
537}
538
539
540static void
541i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
542{
543	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
544
545	if (obj->stolen) {
546		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
547		kfree(obj->stolen);
548		obj->stolen = NULL;
549	}
 
550}
 
551static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
552	.get_pages = i915_gem_object_get_pages_stolen,
553	.put_pages = i915_gem_object_put_pages_stolen,
554	.release = i915_gem_object_release_stolen,
555};
556
557static struct drm_i915_gem_object *
558_i915_gem_object_create_stolen(struct drm_device *dev,
559			       struct drm_mm_node *stolen)
560{
561	struct drm_i915_gem_object *obj;
562
563	obj = i915_gem_object_alloc(dev);
564	if (obj == NULL)
565		return NULL;
566
567	drm_gem_private_object_init(dev, &obj->base, stolen->size);
568	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
569
570	obj->pages = i915_pages_create_for_stolen(dev,
571						  stolen->start, stolen->size);
572	if (obj->pages == NULL)
573		goto cleanup;
574
575	obj->get_page.sg = obj->pages->sgl;
576	obj->get_page.last = 0;
577
578	i915_gem_object_pin_pages(obj);
579	obj->stolen = stolen;
580
581	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
582	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
 
 
 
583
584	return obj;
585
586cleanup:
587	i915_gem_object_free(obj);
588	return NULL;
589}
590
591struct drm_i915_gem_object *
592i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
593{
594	struct drm_i915_private *dev_priv = dev->dev_private;
595	struct drm_i915_gem_object *obj;
596	struct drm_mm_node *stolen;
597	int ret;
598
599	if (!drm_mm_initialized(&dev_priv->mm.stolen))
600		return NULL;
601
602	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
603	if (size == 0)
604		return NULL;
605
606	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
607	if (!stolen)
608		return NULL;
609
610	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
611	if (ret) {
612		kfree(stolen);
613		return NULL;
614	}
615
616	obj = _i915_gem_object_create_stolen(dev, stolen);
617	if (obj)
618		return obj;
619
620	i915_gem_stolen_remove_node(dev_priv, stolen);
621	kfree(stolen);
622	return NULL;
623}
624
625struct drm_i915_gem_object *
626i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
627					       u32 stolen_offset,
628					       u32 gtt_offset,
629					       u32 size)
630{
631	struct drm_i915_private *dev_priv = dev->dev_private;
632	struct i915_address_space *ggtt = &dev_priv->gtt.base;
633	struct drm_i915_gem_object *obj;
634	struct drm_mm_node *stolen;
635	struct i915_vma *vma;
636	int ret;
637
638	if (!drm_mm_initialized(&dev_priv->mm.stolen))
639		return NULL;
640
641	lockdep_assert_held(&dev->struct_mutex);
642
643	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
644			stolen_offset, gtt_offset, size);
645
646	/* KISS and expect everything to be page-aligned */
647	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
648	    WARN_ON(stolen_offset & 4095))
649		return NULL;
650
651	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
652	if (!stolen)
653		return NULL;
654
655	stolen->start = stolen_offset;
656	stolen->size = size;
657	mutex_lock(&dev_priv->mm.stolen_lock);
658	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
659	mutex_unlock(&dev_priv->mm.stolen_lock);
660	if (ret) {
661		DRM_DEBUG_KMS("failed to allocate stolen space\n");
662		kfree(stolen);
663		return NULL;
664	}
665
666	obj = _i915_gem_object_create_stolen(dev, stolen);
667	if (obj == NULL) {
668		DRM_DEBUG_KMS("failed to allocate stolen object\n");
669		i915_gem_stolen_remove_node(dev_priv, stolen);
670		kfree(stolen);
671		return NULL;
672	}
673
674	/* Some objects just need physical mem from stolen space */
675	if (gtt_offset == I915_GTT_OFFSET_NONE)
676		return obj;
677
678	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
 
 
 
 
679	if (IS_ERR(vma)) {
680		ret = PTR_ERR(vma);
681		goto err;
682	}
683
684	/* To simplify the initialisation sequence between KMS and GTT,
685	 * we allow construction of the stolen object prior to
686	 * setting up the GTT space. The actual reservation will occur
687	 * later.
688	 */
689	vma->node.start = gtt_offset;
690	vma->node.size = size;
691	if (drm_mm_initialized(&ggtt->mm)) {
692		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
693		if (ret) {
694			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
695			goto err;
696		}
697
698		vma->bound |= GLOBAL_BIND;
699		__i915_vma_set_map_and_fenceable(vma);
700		list_add_tail(&vma->vm_link, &ggtt->inactive_list);
 
701	}
702
703	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
704	i915_gem_object_pin_pages(obj);
 
 
 
 
705
706	return obj;
707
 
 
708err:
709	drm_gem_object_unreference(&obj->base);
710	return NULL;
711}
v4.10.11
  1/*
  2 * Copyright © 2008-2012 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eric Anholt <eric@anholt.net>
 25 *    Chris Wilson <chris@chris-wilson.co.uk>
 26 *
 27 */
 28
 29#include <drm/drmP.h>
 30#include <drm/i915_drm.h>
 31#include "i915_drv.h"
 32
 33#define KB(x) ((x) * 1024)
 34#define MB(x) (KB(x) * 1024)
 35
 36/*
 37 * The BIOS typically reserves some of the system's memory for the exclusive
 38 * use of the integrated graphics. This memory is no longer available for
 39 * use by the OS and so the user finds that his system has less memory
 40 * available than he put in. We refer to this memory as stolen.
 41 *
 42 * The BIOS will allocate its framebuffer from the stolen memory. Our
 43 * goal is try to reuse that object for our own fbcon which must always
 44 * be available for panics. Anything else we can reuse the stolen memory
 45 * for is a boon.
 46 */
 47
 48int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 49					 struct drm_mm_node *node, u64 size,
 50					 unsigned alignment, u64 start, u64 end)
 51{
 52	int ret;
 53
 54	if (!drm_mm_initialized(&dev_priv->mm.stolen))
 55		return -ENODEV;
 56
 57	/* See the comment at the drm_mm_init() call for more about this check.
 58	 * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
 59	 */
 60	if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
 61		start = 4096;
 62
 63	mutex_lock(&dev_priv->mm.stolen_lock);
 64	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
 65					  alignment, start, end,
 66					  DRM_MM_SEARCH_DEFAULT);
 67	mutex_unlock(&dev_priv->mm.stolen_lock);
 68
 69	return ret;
 70}
 71
 72int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
 73				struct drm_mm_node *node, u64 size,
 74				unsigned alignment)
 75{
 76	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 77
 78	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
 79						    alignment, 0,
 80						    ggtt->stolen_usable_size);
 81}
 82
 83void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 84				 struct drm_mm_node *node)
 85{
 86	mutex_lock(&dev_priv->mm.stolen_lock);
 87	drm_mm_remove_node(node);
 88	mutex_unlock(&dev_priv->mm.stolen_lock);
 89}
 90
 91static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
 92{
 93	struct pci_dev *pdev = dev_priv->drm.pdev;
 94	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 95	struct resource *r;
 96	u32 base;
 97
 98	/* Almost universally we can find the Graphics Base of Stolen Memory
 99	 * at register BSM (0x5c) in the igfx configuration space. On a few
100	 * (desktop) machines this is also mirrored in the bridge device at
101	 * different locations, or in the MCHBAR.
102	 *
103	 * On 865 we just check the TOUD register.
104	 *
105	 * On 830/845/85x the stolen memory base isn't available in any
106	 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
107	 *
108	 */
109	base = 0;
110	if (INTEL_GEN(dev_priv) >= 3) {
111		u32 bsm;
112
113		pci_read_config_dword(pdev, INTEL_BSM, &bsm);
114
115		base = bsm & INTEL_BSM_MASK;
116	} else if (IS_I865G(dev_priv)) {
117		u32 tseg_size = 0;
118		u16 toud = 0;
119		u8 tmp;
120
121		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
122					 I845_ESMRAMC, &tmp);
123
124		if (tmp & TSEG_ENABLE) {
125			switch (tmp & I845_TSEG_SIZE_MASK) {
126			case I845_TSEG_SIZE_512K:
127				tseg_size = KB(512);
128				break;
129			case I845_TSEG_SIZE_1M:
130				tseg_size = MB(1);
131				break;
132			}
133		}
134
135		pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
136					 I865_TOUD, &toud);
137
138		base = (toud << 16) + tseg_size;
139	} else if (IS_I85X(dev_priv)) {
140		u32 tseg_size = 0;
141		u32 tom;
142		u8 tmp;
143
144		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
145					 I85X_ESMRAMC, &tmp);
146
147		if (tmp & TSEG_ENABLE)
148			tseg_size = MB(1);
149
150		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
151					 I85X_DRB3, &tmp);
152		tom = tmp * MB(32);
153
154		base = tom - tseg_size - ggtt->stolen_size;
155	} else if (IS_845G(dev_priv)) {
156		u32 tseg_size = 0;
157		u32 tom;
158		u8 tmp;
159
160		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
161					 I845_ESMRAMC, &tmp);
162
163		if (tmp & TSEG_ENABLE) {
164			switch (tmp & I845_TSEG_SIZE_MASK) {
165			case I845_TSEG_SIZE_512K:
166				tseg_size = KB(512);
167				break;
168			case I845_TSEG_SIZE_1M:
169				tseg_size = MB(1);
170				break;
171			}
172		}
173
174		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
175					 I830_DRB3, &tmp);
176		tom = tmp * MB(32);
177
178		base = tom - tseg_size - ggtt->stolen_size;
179	} else if (IS_I830(dev_priv)) {
180		u32 tseg_size = 0;
181		u32 tom;
182		u8 tmp;
183
184		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
185					 I830_ESMRAMC, &tmp);
186
187		if (tmp & TSEG_ENABLE) {
188			if (tmp & I830_TSEG_SIZE_1M)
189				tseg_size = MB(1);
190			else
191				tseg_size = KB(512);
192		}
193
194		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
195					 I830_DRB3, &tmp);
196		tom = tmp * MB(32);
197
198		base = tom - tseg_size - ggtt->stolen_size;
199	}
200
201	if (base == 0)
202		return 0;
203
204	/* make sure we don't clobber the GTT if it's within stolen memory */
205	if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
206	    !IS_G4X(dev_priv)) {
207		struct {
208			u32 start, end;
209		} stolen[2] = {
210			{ .start = base, .end = base + ggtt->stolen_size, },
211			{ .start = base, .end = base + ggtt->stolen_size, },
212		};
213		u64 ggtt_start, ggtt_end;
214
215		ggtt_start = I915_READ(PGTBL_CTL);
216		if (IS_GEN4(dev_priv))
217			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
218				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
219		else
220			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
221		ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
222
223		if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
224			stolen[0].end = ggtt_start;
225		if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
226			stolen[1].start = ggtt_end;
227
228		/* pick the larger of the two chunks */
229		if (stolen[0].end - stolen[0].start >
230		    stolen[1].end - stolen[1].start) {
231			base = stolen[0].start;
232			ggtt->stolen_size = stolen[0].end - stolen[0].start;
233		} else {
234			base = stolen[1].start;
235			ggtt->stolen_size = stolen[1].end - stolen[1].start;
236		}
237
238		if (stolen[0].start != stolen[1].start ||
239		    stolen[0].end != stolen[1].end) {
240			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
241				      (unsigned long long)ggtt_start,
242				      (unsigned long long)ggtt_end - 1);
243			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
244				      base, base + (u32)ggtt->stolen_size - 1);
245		}
246	}
247
248
249	/* Verify that nothing else uses this physical address. Stolen
250	 * memory should be reserved by the BIOS and hidden from the
251	 * kernel. So if the region is already marked as busy, something
252	 * is seriously wrong.
253	 */
254	r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
255				    "Graphics Stolen Memory");
256	if (r == NULL) {
257		/*
258		 * One more attempt but this time requesting region from
259		 * base + 1, as we have seen that this resolves the region
260		 * conflict with the PCI Bus.
261		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
262		 * PCI bus, but have an off-by-one error. Hence retry the
263		 * reservation starting from 1 instead of 0.
264		 */
265		r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
266					    ggtt->stolen_size - 1,
267					    "Graphics Stolen Memory");
268		/*
269		 * GEN3 firmware likes to smash pci bridges into the stolen
270		 * range. Apparently this works.
271		 */
272		if (r == NULL && !IS_GEN3(dev_priv)) {
273			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
274				  base, base + (uint32_t)ggtt->stolen_size);
275			base = 0;
276		}
277	}
278
279	return base;
280}
281
282void i915_gem_cleanup_stolen(struct drm_device *dev)
283{
284	struct drm_i915_private *dev_priv = to_i915(dev);
285
286	if (!drm_mm_initialized(&dev_priv->mm.stolen))
287		return;
288
289	drm_mm_takedown(&dev_priv->mm.stolen);
290}
291
292static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
293				    unsigned long *base, unsigned long *size)
294{
295	struct i915_ggtt *ggtt = &dev_priv->ggtt;
296	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
297				     CTG_STOLEN_RESERVED :
298				     ELK_STOLEN_RESERVED);
299	unsigned long stolen_top = dev_priv->mm.stolen_base +
300				   ggtt->stolen_size;
301
302	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
303
304	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
305
306	/* On these platforms, the register doesn't have a size field, so the
307	 * size is the distance between the base and the top of the stolen
308	 * memory. We also have the genuine case where base is zero and there's
309	 * nothing reserved. */
310	if (*base == 0)
311		*size = 0;
312	else
313		*size = stolen_top - *base;
314}
315
316static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
317				     unsigned long *base, unsigned long *size)
318{
319	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
320
321	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
322
323	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
324	case GEN6_STOLEN_RESERVED_1M:
325		*size = 1024 * 1024;
326		break;
327	case GEN6_STOLEN_RESERVED_512K:
328		*size = 512 * 1024;
329		break;
330	case GEN6_STOLEN_RESERVED_256K:
331		*size = 256 * 1024;
332		break;
333	case GEN6_STOLEN_RESERVED_128K:
334		*size = 128 * 1024;
335		break;
336	default:
337		*size = 1024 * 1024;
338		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
339	}
340}
341
342static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
343				     unsigned long *base, unsigned long *size)
344{
345	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
346
347	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
348
349	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
350	case GEN7_STOLEN_RESERVED_1M:
351		*size = 1024 * 1024;
352		break;
353	case GEN7_STOLEN_RESERVED_256K:
354		*size = 256 * 1024;
355		break;
356	default:
357		*size = 1024 * 1024;
358		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
359	}
360}
361
362static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
363				     unsigned long *base, unsigned long *size)
364{
365	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
366
367	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
368
369	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
370	case GEN8_STOLEN_RESERVED_1M:
371		*size = 1024 * 1024;
372		break;
373	case GEN8_STOLEN_RESERVED_2M:
374		*size = 2 * 1024 * 1024;
375		break;
376	case GEN8_STOLEN_RESERVED_4M:
377		*size = 4 * 1024 * 1024;
378		break;
379	case GEN8_STOLEN_RESERVED_8M:
380		*size = 8 * 1024 * 1024;
381		break;
382	default:
383		*size = 8 * 1024 * 1024;
384		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
385	}
386}
387
388static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
389				    unsigned long *base, unsigned long *size)
390{
391	struct i915_ggtt *ggtt = &dev_priv->ggtt;
392	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
393	unsigned long stolen_top;
394
395	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
396
397	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
398
399	/* On these platforms, the register doesn't have a size field, so the
400	 * size is the distance between the base and the top of the stolen
401	 * memory. We also have the genuine case where base is zero and there's
402	 * nothing reserved. */
403	if (*base == 0)
404		*size = 0;
405	else
406		*size = stolen_top - *base;
407}
408
409int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
410{
411	struct i915_ggtt *ggtt = &dev_priv->ggtt;
412	unsigned long reserved_total, reserved_base = 0, reserved_size;
413	unsigned long stolen_top;
414
415	mutex_init(&dev_priv->mm.stolen_lock);
416
417	if (intel_vgpu_active(dev_priv)) {
418		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
419		return 0;
420	}
421
422#ifdef CONFIG_INTEL_IOMMU
423	if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
424		DRM_INFO("DMAR active, disabling use of stolen memory\n");
425		return 0;
426	}
427#endif
428
429	if (ggtt->stolen_size == 0)
430		return 0;
431
432	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
433	if (dev_priv->mm.stolen_base == 0)
434		return 0;
435
436	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
437
438	switch (INTEL_INFO(dev_priv)->gen) {
439	case 2:
440	case 3:
441		break;
442	case 4:
443		if (IS_G4X(dev_priv))
444			g4x_get_stolen_reserved(dev_priv, &reserved_base,
445						&reserved_size);
446		break;
447	case 5:
448		/* Assume the gen6 maximum for the older platforms. */
449		reserved_size = 1024 * 1024;
450		reserved_base = stolen_top - reserved_size;
451		break;
452	case 6:
453		gen6_get_stolen_reserved(dev_priv, &reserved_base,
454					 &reserved_size);
455		break;
456	case 7:
457		gen7_get_stolen_reserved(dev_priv, &reserved_base,
458					 &reserved_size);
459		break;
460	default:
461		if (IS_BROADWELL(dev_priv) ||
462		    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
463			bdw_get_stolen_reserved(dev_priv, &reserved_base,
464						&reserved_size);
465		else
466			gen8_get_stolen_reserved(dev_priv, &reserved_base,
467						 &reserved_size);
468		break;
469	}
470
471	/* It is possible for the reserved base to be zero, but the register
472	 * field for size doesn't have a zero option. */
473	if (reserved_base == 0) {
474		reserved_size = 0;
475		reserved_base = stolen_top;
476	}
477
478	if (reserved_base < dev_priv->mm.stolen_base ||
479	    reserved_base + reserved_size > stolen_top) {
480		DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
481			      reserved_base, reserved_base + reserved_size,
482			      dev_priv->mm.stolen_base, stolen_top);
483		return 0;
484	}
485
486	ggtt->stolen_reserved_base = reserved_base;
487	ggtt->stolen_reserved_size = reserved_size;
488
489	/* It is possible for the reserved area to end before the end of stolen
490	 * memory, so just consider the start. */
491	reserved_total = stolen_top - reserved_base;
492
493	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
494		      ggtt->stolen_size >> 10,
495		      (ggtt->stolen_size - reserved_total) >> 10);
496
497	ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
 
498
499	/*
500	 * Basic memrange allocator for stolen space.
501	 *
502	 * TODO: Notice that some platforms require us to not use the first page
503	 * of the stolen memory but their BIOSes may still put the framebuffer
504	 * on the first page. So we don't reserve this page for now because of
505	 * that. Our current solution is to just prevent new nodes from being
506	 * inserted on the first page - see the check we have at
507	 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
508	 * problem later.
509	 */
510	drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
511
512	return 0;
513}
514
515static struct sg_table *
516i915_pages_create_for_stolen(struct drm_device *dev,
517			     u32 offset, u32 size)
518{
519	struct drm_i915_private *dev_priv = to_i915(dev);
520	struct sg_table *st;
521	struct scatterlist *sg;
522
523	GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
 
524
525	/* We hide that we have no struct page backing our stolen object
526	 * by wrapping the contiguous physical allocation with a fake
527	 * dma mapping in a single scatterlist.
528	 */
529
530	st = kmalloc(sizeof(*st), GFP_KERNEL);
531	if (st == NULL)
532		return ERR_PTR(-ENOMEM);
533
534	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
535		kfree(st);
536		return ERR_PTR(-ENOMEM);
537	}
538
539	sg = st->sgl;
540	sg->offset = 0;
541	sg->length = size;
542
543	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
544	sg_dma_len(sg) = size;
545
546	return st;
547}
548
549static struct sg_table *
550i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
551{
552	return i915_pages_create_for_stolen(obj->base.dev,
553					    obj->stolen->start,
554					    obj->stolen->size);
555}
556
557static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
558					     struct sg_table *pages)
559{
560	/* Should only be called from i915_gem_object_release_stolen() */
561	sg_free_table(pages);
562	kfree(pages);
563}
564
 
565static void
566i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
567{
568	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
569	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
570
571	GEM_BUG_ON(!stolen);
572
573	__i915_gem_object_unpin_pages(obj);
574
575	i915_gem_stolen_remove_node(dev_priv, stolen);
576	kfree(stolen);
577}
578
579static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
580	.get_pages = i915_gem_object_get_pages_stolen,
581	.put_pages = i915_gem_object_put_pages_stolen,
582	.release = i915_gem_object_release_stolen,
583};
584
585static struct drm_i915_gem_object *
586_i915_gem_object_create_stolen(struct drm_device *dev,
587			       struct drm_mm_node *stolen)
588{
589	struct drm_i915_gem_object *obj;
590
591	obj = i915_gem_object_alloc(dev);
592	if (obj == NULL)
593		return NULL;
594
595	drm_gem_private_object_init(dev, &obj->base, stolen->size);
596	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
597
 
 
 
 
 
 
 
 
 
598	obj->stolen = stolen;
 
599	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
600	obj->cache_level = HAS_LLC(to_i915(dev)) ?
601			   I915_CACHE_LLC : I915_CACHE_NONE;
602
603	if (i915_gem_object_pin_pages(obj))
604		goto cleanup;
605
606	return obj;
607
608cleanup:
609	i915_gem_object_free(obj);
610	return NULL;
611}
612
613struct drm_i915_gem_object *
614i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
615{
616	struct drm_i915_private *dev_priv = to_i915(dev);
617	struct drm_i915_gem_object *obj;
618	struct drm_mm_node *stolen;
619	int ret;
620
621	if (!drm_mm_initialized(&dev_priv->mm.stolen))
622		return NULL;
623
 
624	if (size == 0)
625		return NULL;
626
627	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
628	if (!stolen)
629		return NULL;
630
631	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
632	if (ret) {
633		kfree(stolen);
634		return NULL;
635	}
636
637	obj = _i915_gem_object_create_stolen(dev, stolen);
638	if (obj)
639		return obj;
640
641	i915_gem_stolen_remove_node(dev_priv, stolen);
642	kfree(stolen);
643	return NULL;
644}
645
646struct drm_i915_gem_object *
647i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
648					       u32 stolen_offset,
649					       u32 gtt_offset,
650					       u32 size)
651{
652	struct drm_i915_private *dev_priv = to_i915(dev);
653	struct i915_ggtt *ggtt = &dev_priv->ggtt;
654	struct drm_i915_gem_object *obj;
655	struct drm_mm_node *stolen;
656	struct i915_vma *vma;
657	int ret;
658
659	if (!drm_mm_initialized(&dev_priv->mm.stolen))
660		return NULL;
661
662	lockdep_assert_held(&dev->struct_mutex);
663
664	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
665			stolen_offset, gtt_offset, size);
666
667	/* KISS and expect everything to be page-aligned */
668	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
669	    WARN_ON(stolen_offset & 4095))
670		return NULL;
671
672	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
673	if (!stolen)
674		return NULL;
675
676	stolen->start = stolen_offset;
677	stolen->size = size;
678	mutex_lock(&dev_priv->mm.stolen_lock);
679	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
680	mutex_unlock(&dev_priv->mm.stolen_lock);
681	if (ret) {
682		DRM_DEBUG_KMS("failed to allocate stolen space\n");
683		kfree(stolen);
684		return NULL;
685	}
686
687	obj = _i915_gem_object_create_stolen(dev, stolen);
688	if (obj == NULL) {
689		DRM_DEBUG_KMS("failed to allocate stolen object\n");
690		i915_gem_stolen_remove_node(dev_priv, stolen);
691		kfree(stolen);
692		return NULL;
693	}
694
695	/* Some objects just need physical mem from stolen space */
696	if (gtt_offset == I915_GTT_OFFSET_NONE)
697		return obj;
698
699	ret = i915_gem_object_pin_pages(obj);
700	if (ret)
701		goto err;
702
703	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
704	if (IS_ERR(vma)) {
705		ret = PTR_ERR(vma);
706		goto err_pages;
707	}
708
709	/* To simplify the initialisation sequence between KMS and GTT,
710	 * we allow construction of the stolen object prior to
711	 * setting up the GTT space. The actual reservation will occur
712	 * later.
713	 */
714	vma->node.start = gtt_offset;
715	vma->node.size = size;
 
 
 
 
 
 
716
717	ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
718	if (ret) {
719		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
720		goto err_pages;
721	}
722
723	vma->pages = obj->mm.pages;
724	vma->flags |= I915_VMA_GLOBAL_BIND;
725	__i915_vma_set_map_and_fenceable(vma);
726	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
727	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
728	obj->bind_count++;
729
730	return obj;
731
732err_pages:
733	i915_gem_object_unpin_pages(obj);
734err:
735	i915_gem_object_put(obj);
736	return NULL;
737}