Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2014-2016 Intel Corporation
  5 */
  6
  7#include "i915_drv.h"
  8#include "i915_gem_object.h"
  9#include "i915_scatterlist.h"
 10#include "i915_gem_lmem.h"
 11#include "i915_gem_mman.h"
 12
 13void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 14				 struct sg_table *pages,
 15				 unsigned int sg_page_sizes)
 16{
 17	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 18	unsigned long supported = INTEL_INFO(i915)->page_sizes;
 19	int i;
 20
 21	lockdep_assert_held(&obj->mm.lock);
 22
 23	if (i915_gem_object_is_volatile(obj))
 24		obj->mm.madv = I915_MADV_DONTNEED;
 25
 26	/* Make the pages coherent with the GPU (flushing any swapin). */
 27	if (obj->cache_dirty) {
 28		obj->write_domain = 0;
 29		if (i915_gem_object_has_struct_page(obj))
 30			drm_clflush_sg(pages);
 31		obj->cache_dirty = false;
 32	}
 33
 34	obj->mm.get_page.sg_pos = pages->sgl;
 35	obj->mm.get_page.sg_idx = 0;
 36
 37	obj->mm.pages = pages;
 38
 39	if (i915_gem_object_is_tiled(obj) &&
 40	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 41		GEM_BUG_ON(obj->mm.quirked);
 42		__i915_gem_object_pin_pages(obj);
 43		obj->mm.quirked = true;
 44	}
 45
 46	GEM_BUG_ON(!sg_page_sizes);
 47	obj->mm.page_sizes.phys = sg_page_sizes;
 48
 49	/*
 50	 * Calculate the supported page-sizes which fit into the given
 51	 * sg_page_sizes. This will give us the page-sizes which we may be able
 52	 * to use opportunistically when later inserting into the GTT. For
 53	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
 54	 * 64K or 4K pages, although in practice this will depend on a number of
 55	 * other factors.
 56	 */
 57	obj->mm.page_sizes.sg = 0;
 58	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
 59		if (obj->mm.page_sizes.phys & ~0u << i)
 60			obj->mm.page_sizes.sg |= BIT(i);
 61	}
 62	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
 63
 64	if (i915_gem_object_is_shrinkable(obj)) {
 65		struct list_head *list;
 66		unsigned long flags;
 67
 68		spin_lock_irqsave(&i915->mm.obj_lock, flags);
 69
 70		i915->mm.shrink_count++;
 71		i915->mm.shrink_memory += obj->base.size;
 72
 73		if (obj->mm.madv != I915_MADV_WILLNEED)
 74			list = &i915->mm.purge_list;
 75		else
 76			list = &i915->mm.shrink_list;
 77		list_add_tail(&obj->mm.link, list);
 78
 79		atomic_set(&obj->mm.shrink_pin, 0);
 80		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 81	}
 82}
 83
 84int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 85{
 86	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 87	int err;
 88
 89	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
 90		drm_dbg(&i915->drm,
 91			"Attempting to obtain a purgeable object\n");
 92		return -EFAULT;
 93	}
 94
 95	err = obj->ops->get_pages(obj);
 96	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
 97
 98	return err;
 99}
100
101/* Ensure that the associated pages are gathered from the backing storage
102 * and pinned into our object. i915_gem_object_pin_pages() may be called
103 * multiple times before they are released by a single call to
104 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
105 * either as a result of memory pressure (reaping pages under the shrinker)
106 * or as the object is itself released.
107 */
108int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
109{
110	int err;
111
112	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
113	if (err)
114		return err;
115
116	if (unlikely(!i915_gem_object_has_pages(obj))) {
117		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
118
119		err = ____i915_gem_object_get_pages(obj);
120		if (err)
121			goto unlock;
122
123		smp_mb__before_atomic();
124	}
125	atomic_inc(&obj->mm.pages_pin_count);
126
127unlock:
128	mutex_unlock(&obj->mm.lock);
129	return err;
130}
131
132/* Immediately discard the backing storage */
133void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
134{
135	drm_gem_free_mmap_offset(&obj->base);
136	if (obj->ops->truncate)
137		obj->ops->truncate(obj);
138}
139
140/* Try to discard unwanted pages */
141void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
142{
143	lockdep_assert_held(&obj->mm.lock);
144	GEM_BUG_ON(i915_gem_object_has_pages(obj));
145
146	if (obj->ops->writeback)
147		obj->ops->writeback(obj);
148}
149
150static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
151{
152	struct radix_tree_iter iter;
153	void __rcu **slot;
154
155	rcu_read_lock();
156	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
157		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
158	rcu_read_unlock();
159}
160
161static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
162{
163	if (is_vmalloc_addr(ptr))
164		vunmap(ptr);
165	else
166		kunmap(kmap_to_page(ptr));
167}
168
169struct sg_table *
170__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
171{
172	struct sg_table *pages;
173
174	pages = fetch_and_zero(&obj->mm.pages);
175	if (IS_ERR_OR_NULL(pages))
176		return pages;
177
178	if (i915_gem_object_is_volatile(obj))
179		obj->mm.madv = I915_MADV_WILLNEED;
180
181	i915_gem_object_make_unshrinkable(obj);
182
183	if (obj->mm.mapping) {
184		unmap_object(obj, page_mask_bits(obj->mm.mapping));
185		obj->mm.mapping = NULL;
186	}
187
188	__i915_gem_object_reset_page_iter(obj);
189	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
190
191	return pages;
192}
193
194int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
195{
196	struct sg_table *pages;
197	int err;
198
199	if (i915_gem_object_has_pinned_pages(obj))
200		return -EBUSY;
201
202	/* May be called by shrinker from within get_pages() (on another bo) */
203	mutex_lock(&obj->mm.lock);
204	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
205		err = -EBUSY;
206		goto unlock;
207	}
208
209	i915_gem_object_release_mmap_offset(obj);
210
211	/*
212	 * ->put_pages might need to allocate memory for the bit17 swizzle
213	 * array, hence protect them from being reaped by removing them from gtt
214	 * lists early.
215	 */
216	pages = __i915_gem_object_unset_pages(obj);
217
218	/*
219	 * XXX Temporary hijinx to avoid updating all backends to handle
220	 * NULL pages. In the future, when we have more asynchronous
221	 * get_pages backends we should be better able to handle the
222	 * cancellation of the async task in a more uniform manner.
223	 */
224	if (!pages && !i915_gem_object_needs_async_cancel(obj))
225		pages = ERR_PTR(-EINVAL);
226
227	if (!IS_ERR(pages))
228		obj->ops->put_pages(obj, pages);
229
230	err = 0;
231unlock:
232	mutex_unlock(&obj->mm.lock);
233
234	return err;
235}
236
237static inline pte_t iomap_pte(resource_size_t base,
238			      dma_addr_t offset,
239			      pgprot_t prot)
240{
241	return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
242}
243
244/* The 'mapping' part of i915_gem_object_pin_map() below */
245static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
246				 enum i915_map_type type)
247{
248	unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
249	struct sg_table *sgt = obj->mm.pages;
250	pte_t *stack[32], **mem;
251	struct vm_struct *area;
252	pgprot_t pgprot;
253
254	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
255		return NULL;
256
257	/* A single page can always be kmapped */
258	if (n_pte == 1 && type == I915_MAP_WB)
259		return kmap(sg_page(sgt->sgl));
260
261	mem = stack;
262	if (n_pte > ARRAY_SIZE(stack)) {
263		/* Too big for stack -- allocate temporary array instead */
264		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
265		if (!mem)
266			return NULL;
267	}
268
269	area = alloc_vm_area(obj->base.size, mem);
270	if (!area) {
271		if (mem != stack)
272			kvfree(mem);
273		return NULL;
274	}
275
276	switch (type) {
277	default:
278		MISSING_CASE(type);
279		fallthrough;	/* to use PAGE_KERNEL anyway */
280	case I915_MAP_WB:
281		pgprot = PAGE_KERNEL;
282		break;
283	case I915_MAP_WC:
284		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
285		break;
286	}
287
288	if (i915_gem_object_has_struct_page(obj)) {
289		struct sgt_iter iter;
290		struct page *page;
291		pte_t **ptes = mem;
292
293		for_each_sgt_page(page, iter, sgt)
294			**ptes++ = mk_pte(page, pgprot);
295	} else {
296		resource_size_t iomap;
297		struct sgt_iter iter;
298		pte_t **ptes = mem;
299		dma_addr_t addr;
300
301		iomap = obj->mm.region->iomap.base;
302		iomap -= obj->mm.region->region.start;
303
304		for_each_sgt_daddr(addr, iter, sgt)
305			**ptes++ = iomap_pte(iomap, addr, pgprot);
306	}
307
308	if (mem != stack)
309		kvfree(mem);
310
311	return area->addr;
312}
313
314/* get, pin, and map the pages of the object into kernel space */
315void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
316			      enum i915_map_type type)
317{
318	enum i915_map_type has_type;
319	unsigned int flags;
320	bool pinned;
321	void *ptr;
322	int err;
323
324	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
325	if (!i915_gem_object_type_has(obj, flags))
326		return ERR_PTR(-ENXIO);
327
328	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
329	if (err)
330		return ERR_PTR(err);
331
332	pinned = !(type & I915_MAP_OVERRIDE);
333	type &= ~I915_MAP_OVERRIDE;
334
335	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
336		if (unlikely(!i915_gem_object_has_pages(obj))) {
337			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
338
339			err = ____i915_gem_object_get_pages(obj);
340			if (err)
341				goto err_unlock;
342
343			smp_mb__before_atomic();
344		}
345		atomic_inc(&obj->mm.pages_pin_count);
346		pinned = false;
347	}
348	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
349
350	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
351	if (ptr && has_type != type) {
352		if (pinned) {
353			err = -EBUSY;
354			goto err_unpin;
355		}
356
357		unmap_object(obj, ptr);
358
359		ptr = obj->mm.mapping = NULL;
360	}
361
362	if (!ptr) {
363		ptr = i915_gem_object_map(obj, type);
364		if (!ptr) {
365			err = -ENOMEM;
366			goto err_unpin;
367		}
368
369		obj->mm.mapping = page_pack_bits(ptr, type);
370	}
371
372out_unlock:
373	mutex_unlock(&obj->mm.lock);
374	return ptr;
375
376err_unpin:
377	atomic_dec(&obj->mm.pages_pin_count);
378err_unlock:
379	ptr = ERR_PTR(err);
380	goto out_unlock;
381}
382
383void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
384				 unsigned long offset,
385				 unsigned long size)
386{
387	enum i915_map_type has_type;
388	void *ptr;
389
390	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
391	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
392				     offset, size, obj->base.size));
393
394	wmb(); /* let all previous writes be visible to coherent partners */
395	obj->mm.dirty = true;
396
397	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
398		return;
399
400	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
401	if (has_type == I915_MAP_WC)
402		return;
403
404	drm_clflush_virt_range(ptr + offset, size);
405	if (size == obj->base.size) {
406		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
407		obj->cache_dirty = false;
408	}
409}
410
411void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
412{
413	GEM_BUG_ON(!obj->mm.mapping);
414
415	/*
416	 * We allow removing the mapping from underneath pinned pages!
417	 *
418	 * Furthermore, since this is an unsafe operation reserved only
419	 * for construction time manipulation, we ignore locking prudence.
420	 */
421	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
422
423	i915_gem_object_unpin_map(obj);
424}
425
426struct scatterlist *
427i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
428		       unsigned int n,
429		       unsigned int *offset)
430{
431	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
432	struct scatterlist *sg;
433	unsigned int idx, count;
434
435	might_sleep();
436	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
437	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
438
439	/* As we iterate forward through the sg, we record each entry in a
440	 * radixtree for quick repeated (backwards) lookups. If we have seen
441	 * this index previously, we will have an entry for it.
442	 *
443	 * Initial lookup is O(N), but this is amortized to O(1) for
444	 * sequential page access (where each new request is consecutive
445	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
446	 * i.e. O(1) with a large constant!
447	 */
448	if (n < READ_ONCE(iter->sg_idx))
449		goto lookup;
450
451	mutex_lock(&iter->lock);
452
453	/* We prefer to reuse the last sg so that repeated lookup of this
454	 * (or the subsequent) sg are fast - comparing against the last
455	 * sg is faster than going through the radixtree.
456	 */
457
458	sg = iter->sg_pos;
459	idx = iter->sg_idx;
460	count = __sg_page_count(sg);
461
462	while (idx + count <= n) {
463		void *entry;
464		unsigned long i;
465		int ret;
466
467		/* If we cannot allocate and insert this entry, or the
468		 * individual pages from this range, cancel updating the
469		 * sg_idx so that on this lookup we are forced to linearly
470		 * scan onwards, but on future lookups we will try the
471		 * insertion again (in which case we need to be careful of
472		 * the error return reporting that we have already inserted
473		 * this index).
474		 */
475		ret = radix_tree_insert(&iter->radix, idx, sg);
476		if (ret && ret != -EEXIST)
477			goto scan;
478
479		entry = xa_mk_value(idx);
480		for (i = 1; i < count; i++) {
481			ret = radix_tree_insert(&iter->radix, idx + i, entry);
482			if (ret && ret != -EEXIST)
483				goto scan;
484		}
485
486		idx += count;
487		sg = ____sg_next(sg);
488		count = __sg_page_count(sg);
489	}
490
491scan:
492	iter->sg_pos = sg;
493	iter->sg_idx = idx;
494
495	mutex_unlock(&iter->lock);
496
497	if (unlikely(n < idx)) /* insertion completed by another thread */
498		goto lookup;
499
500	/* In case we failed to insert the entry into the radixtree, we need
501	 * to look beyond the current sg.
502	 */
503	while (idx + count <= n) {
504		idx += count;
505		sg = ____sg_next(sg);
506		count = __sg_page_count(sg);
507	}
508
509	*offset = n - idx;
510	return sg;
511
512lookup:
513	rcu_read_lock();
514
515	sg = radix_tree_lookup(&iter->radix, n);
516	GEM_BUG_ON(!sg);
517
518	/* If this index is in the middle of multi-page sg entry,
519	 * the radix tree will contain a value entry that points
520	 * to the start of that range. We will return the pointer to
521	 * the base page and the offset of this page within the
522	 * sg entry's range.
523	 */
524	*offset = 0;
525	if (unlikely(xa_is_value(sg))) {
526		unsigned long base = xa_to_value(sg);
527
528		sg = radix_tree_lookup(&iter->radix, base);
529		GEM_BUG_ON(!sg);
530
531		*offset = n - base;
532	}
533
534	rcu_read_unlock();
535
536	return sg;
537}
538
539struct page *
540i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
541{
542	struct scatterlist *sg;
543	unsigned int offset;
544
545	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
546
547	sg = i915_gem_object_get_sg(obj, n, &offset);
548	return nth_page(sg_page(sg), offset);
549}
550
551/* Like i915_gem_object_get_page(), but mark the returned page dirty */
552struct page *
553i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
554			       unsigned int n)
555{
556	struct page *page;
557
558	page = i915_gem_object_get_page(obj, n);
559	if (!obj->mm.dirty)
560		set_page_dirty(page);
561
562	return page;
563}
564
565dma_addr_t
566i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
567				    unsigned long n,
568				    unsigned int *len)
569{
570	struct scatterlist *sg;
571	unsigned int offset;
572
573	sg = i915_gem_object_get_sg(obj, n, &offset);
574
575	if (len)
576		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
577
578	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
579}
580
581dma_addr_t
582i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
583				unsigned long n)
584{
585	return i915_gem_object_get_dma_address_len(obj, n, NULL);
586}