Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "intel_memory_region.h"
7#include "intel_region_ttm.h"
8#include "gem/i915_gem_region.h"
9#include "gem/i915_gem_lmem.h"
10#include "i915_drv.h"
11
12static void lmem_put_pages(struct drm_i915_gem_object *obj,
13 struct sg_table *pages)
14{
15 intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
16 obj->mm.dirty = false;
17 sg_free_table(pages);
18 kfree(pages);
19}
20
21static int lmem_get_pages(struct drm_i915_gem_object *obj)
22{
23 unsigned int flags;
24 struct sg_table *pages;
25
26 flags = I915_ALLOC_MIN_PAGE_SIZE;
27 if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
28 flags |= I915_ALLOC_CONTIGUOUS;
29
30 obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
31 obj->base.size,
32 flags);
33 if (IS_ERR(obj->mm.st_mm_node))
34 return PTR_ERR(obj->mm.st_mm_node);
35
36 /* Range manager is always contigous */
37 if (obj->mm.region->is_range_manager)
38 obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
39 pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
40 if (IS_ERR(pages)) {
41 intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
42 return PTR_ERR(pages);
43 }
44
45 __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
46
47 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
48 void __iomem *vaddr =
49 i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
50
51 if (!vaddr) {
52 struct sg_table *pages =
53 __i915_gem_object_unset_pages(obj);
54
55 if (!IS_ERR_OR_NULL(pages))
56 lmem_put_pages(obj, pages);
57 }
58
59 memset_io(vaddr, 0, obj->base.size);
60 io_mapping_unmap(vaddr);
61 }
62
63 return 0;
64}
65
66const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
67 .name = "i915_gem_object_lmem",
68 .flags = I915_GEM_OBJECT_HAS_IOMEM,
69
70 .get_pages = lmem_get_pages,
71 .put_pages = lmem_put_pages,
72 .release = i915_gem_object_release_memory_region,
73};
74
75void __iomem *
76i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
77 unsigned long n,
78 unsigned long size)
79{
80 resource_size_t offset;
81
82 GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
83
84 offset = i915_gem_object_get_dma_address(obj, n);
85 offset -= obj->mm.region->region.start;
86
87 return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
88}
89
90bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
91{
92 struct intel_memory_region *mr = obj->mm.region;
93
94 return mr && (mr->type == INTEL_MEMORY_LOCAL ||
95 mr->type == INTEL_MEMORY_STOLEN_LOCAL);
96}
97
98struct drm_i915_gem_object *
99i915_gem_object_create_lmem(struct drm_i915_private *i915,
100 resource_size_t size,
101 unsigned int flags)
102{
103 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
104 size, flags);
105}
106
107int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
108 struct drm_i915_gem_object *obj,
109 resource_size_t size,
110 unsigned int flags)
111{
112 static struct lock_class_key lock_class;
113 struct drm_i915_private *i915 = mem->i915;
114
115 drm_gem_private_object_init(&i915->drm, &obj->base, size);
116 i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
117
118 obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
119
120 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
121
122 i915_gem_object_init_memory_region(obj, mem);
123
124 return 0;
125}