Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "intel_memory_region.h"
8#include "intel_region_lmem.h"
9#include "intel_region_ttm.h"
10#include "gem/i915_gem_lmem.h"
11#include "gem/i915_gem_region.h"
12#include "intel_region_lmem.h"
13
14static int init_fake_lmem_bar(struct intel_memory_region *mem)
15{
16 struct drm_i915_private *i915 = mem->i915;
17 struct i915_ggtt *ggtt = &i915->ggtt;
18 unsigned long n;
19 int ret;
20
21 /* We want to 1:1 map the mappable aperture to our reserved region */
22
23 mem->fake_mappable.start = 0;
24 mem->fake_mappable.size = resource_size(&mem->region);
25 mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
26
27 ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
28 if (ret)
29 return ret;
30
31 mem->remap_addr = dma_map_resource(i915->drm.dev,
32 mem->region.start,
33 mem->fake_mappable.size,
34 PCI_DMA_BIDIRECTIONAL,
35 DMA_ATTR_FORCE_CONTIGUOUS);
36 if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
37 drm_mm_remove_node(&mem->fake_mappable);
38 return -EINVAL;
39 }
40
41 for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
42 ggtt->vm.insert_page(&ggtt->vm,
43 mem->remap_addr + (n << PAGE_SHIFT),
44 n << PAGE_SHIFT,
45 I915_CACHE_NONE, 0);
46 }
47
48 mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
49 mem->fake_mappable.size);
50
51 return 0;
52}
53
54static void release_fake_lmem_bar(struct intel_memory_region *mem)
55{
56 if (!drm_mm_node_allocated(&mem->fake_mappable))
57 return;
58
59 drm_mm_remove_node(&mem->fake_mappable);
60
61 dma_unmap_resource(mem->i915->drm.dev,
62 mem->remap_addr,
63 mem->fake_mappable.size,
64 PCI_DMA_BIDIRECTIONAL,
65 DMA_ATTR_FORCE_CONTIGUOUS);
66}
67
68static void
69region_lmem_release(struct intel_memory_region *mem)
70{
71 intel_region_ttm_fini(mem);
72 io_mapping_fini(&mem->iomap);
73 release_fake_lmem_bar(mem);
74}
75
76static int
77region_lmem_init(struct intel_memory_region *mem)
78{
79 int ret;
80
81 if (mem->i915->params.fake_lmem_start) {
82 ret = init_fake_lmem_bar(mem);
83 GEM_BUG_ON(ret);
84 }
85
86 if (!io_mapping_init_wc(&mem->iomap,
87 mem->io_start,
88 resource_size(&mem->region))) {
89 ret = -EIO;
90 goto out_no_io;
91 }
92
93 ret = intel_region_ttm_init(mem);
94 if (ret)
95 goto out_no_buddy;
96
97 return 0;
98
99out_no_buddy:
100 io_mapping_fini(&mem->iomap);
101out_no_io:
102 release_fake_lmem_bar(mem);
103
104 return ret;
105}
106
107static const struct intel_memory_region_ops intel_region_lmem_ops = {
108 .init = region_lmem_init,
109 .release = region_lmem_release,
110 .init_object = __i915_gem_lmem_object_init,
111};
112
113struct intel_memory_region *
114intel_gt_setup_fake_lmem(struct intel_gt *gt)
115{
116 struct drm_i915_private *i915 = gt->i915;
117 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
118 struct intel_memory_region *mem;
119 resource_size_t mappable_end;
120 resource_size_t io_start;
121 resource_size_t start;
122
123 if (!HAS_LMEM(i915))
124 return ERR_PTR(-ENODEV);
125
126 if (!i915->params.fake_lmem_start)
127 return ERR_PTR(-ENODEV);
128
129 GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
130
131 /* Your mappable aperture belongs to me now! */
132 mappable_end = pci_resource_len(pdev, 2);
133 io_start = pci_resource_start(pdev, 2);
134 start = i915->params.fake_lmem_start;
135
136 mem = intel_memory_region_create(i915,
137 start,
138 mappable_end,
139 PAGE_SIZE,
140 io_start,
141 INTEL_MEMORY_LOCAL,
142 0,
143 &intel_region_lmem_ops);
144 if (!IS_ERR(mem)) {
145 drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
146 &mem->region);
147 drm_info(&i915->drm,
148 "Intel graphics fake LMEM IO start: %llx\n",
149 (u64)mem->io_start);
150 drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
151 (u64)resource_size(&mem->region));
152 }
153
154 return mem;
155}
156
157static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
158 u64 *start, u32 *size)
159{
160 if (!IS_DG1_REVID(uncore->i915, DG1_REVID_A0, DG1_REVID_B0))
161 return false;
162
163 *start = 0;
164 *size = SZ_1M;
165
166 drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
167 *start, *start + *size);
168
169 return true;
170}
171
172static int reserve_lowmem_region(struct intel_uncore *uncore,
173 struct intel_memory_region *mem)
174{
175 u64 reserve_start;
176 u32 reserve_size;
177 int ret;
178
179 if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
180 return 0;
181
182 ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
183 if (ret)
184 drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
185
186 return ret;
187}
188
189static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
190{
191 struct drm_i915_private *i915 = gt->i915;
192 struct intel_uncore *uncore = gt->uncore;
193 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
194 struct intel_memory_region *mem;
195 resource_size_t io_start;
196 resource_size_t lmem_size;
197 int err;
198
199 if (!IS_DGFX(i915))
200 return ERR_PTR(-ENODEV);
201
202 /* Stolen starts from GSMBASE on DG1 */
203 lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
204
205 io_start = pci_resource_start(pdev, 2);
206 if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
207 return ERR_PTR(-ENODEV);
208
209 mem = intel_memory_region_create(i915,
210 0,
211 lmem_size,
212 I915_GTT_PAGE_SIZE_4K,
213 io_start,
214 INTEL_MEMORY_LOCAL,
215 0,
216 &intel_region_lmem_ops);
217 if (IS_ERR(mem))
218 return mem;
219
220 err = reserve_lowmem_region(uncore, mem);
221 if (err)
222 goto err_region_put;
223
224 drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
225 drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
226 &mem->io_start);
227 drm_info(&i915->drm, "Local memory available: %pa\n",
228 &lmem_size);
229
230 return mem;
231
232err_region_put:
233 intel_memory_region_put(mem);
234 return ERR_PTR(err);
235}
236
237struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
238{
239 return setup_lmem(gt);
240}