Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#include <linux/scatterlist.h>
7#include <linux/mmu_notifier.h>
8#include <linux/dma-mapping.h>
9#include <linux/memremap.h>
10#include <linux/swap.h>
11#include <linux/hmm.h>
12#include <linux/mm.h>
13#include "xe_hmm.h"
14#include "xe_vm.h"
15#include "xe_bo.h"
16
17static u64 xe_npages_in_range(unsigned long start, unsigned long end)
18{
19 return (end - start) >> PAGE_SHIFT;
20}
21
22/**
23 * xe_mark_range_accessed() - mark a range is accessed, so core mm
24 * have such information for memory eviction or write back to
25 * hard disk
26 * @range: the range to mark
27 * @write: if write to this range, we mark pages in this range
28 * as dirty
29 */
30static void xe_mark_range_accessed(struct hmm_range *range, bool write)
31{
32 struct page *page;
33 u64 i, npages;
34
35 npages = xe_npages_in_range(range->start, range->end);
36 for (i = 0; i < npages; i++) {
37 page = hmm_pfn_to_page(range->hmm_pfns[i]);
38 if (write)
39 set_page_dirty_lock(page);
40
41 mark_page_accessed(page);
42 }
43}
44
45static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
46 struct hmm_range *range, struct rw_semaphore *notifier_sem)
47{
48 unsigned long i, npages, hmm_pfn;
49 unsigned long num_chunks = 0;
50 int ret;
51
52 /* HMM docs says this is needed. */
53 ret = down_read_interruptible(notifier_sem);
54 if (ret)
55 return ret;
56
57 if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
58 up_read(notifier_sem);
59 return -EAGAIN;
60 }
61
62 npages = xe_npages_in_range(range->start, range->end);
63 for (i = 0; i < npages;) {
64 unsigned long len;
65
66 hmm_pfn = range->hmm_pfns[i];
67 xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
68
69 len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
70
71 /* If order > 0 the page may extend beyond range->start */
72 len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
73 i += len;
74 num_chunks++;
75 }
76 up_read(notifier_sem);
77
78 return sg_alloc_table(st, num_chunks, GFP_KERNEL);
79}
80
81/**
82 * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
83 * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
84 * and will be used to program GPU page table later.
85 * @xe: the xe device who will access the dma-address in sg table
86 * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
87 * has the pfn numbers of pages that back up this hmm address range.
88 * @st: pointer to the sg table.
89 * @notifier_sem: The xe notifier lock.
90 * @write: whether we write to this range. This decides dma map direction
91 * for system pages. If write we map it bi-diretional; otherwise
92 * DMA_TO_DEVICE
93 *
94 * All the contiguous pfns will be collapsed into one entry in
95 * the scatter gather table. This is for the purpose of efficiently
96 * programming GPU page table.
97 *
98 * The dma_address in the sg table will later be used by GPU to
99 * access memory. So if the memory is system memory, we need to
100 * do a dma-mapping so it can be accessed by GPU/DMA.
101 *
102 * FIXME: This function currently only support pages in system
103 * memory. If the memory is GPU local memory (of the GPU who
104 * is going to access memory), we need gpu dpa (device physical
105 * address), and there is no need of dma-mapping. This is TBD.
106 *
107 * FIXME: dma-mapping for peer gpu device to access remote gpu's
108 * memory. Add this when you support p2p
109 *
110 * This function allocates the storage of the sg table. It is
111 * caller's responsibility to free it calling sg_free_table.
112 *
113 * Returns 0 if successful; -ENOMEM if fails to allocate memory
114 */
115static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
116 struct sg_table *st,
117 struct rw_semaphore *notifier_sem,
118 bool write)
119{
120 unsigned long npages = xe_npages_in_range(range->start, range->end);
121 struct device *dev = xe->drm.dev;
122 struct scatterlist *sgl;
123 struct page *page;
124 unsigned long i, j;
125
126 lockdep_assert_held(notifier_sem);
127
128 i = 0;
129 for_each_sg(st->sgl, sgl, st->nents, j) {
130 unsigned long hmm_pfn, size;
131
132 hmm_pfn = range->hmm_pfns[i];
133 page = hmm_pfn_to_page(hmm_pfn);
134 xe_assert(xe, !is_device_private_page(page));
135
136 size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
137 size -= page_to_pfn(page) & (size - 1);
138 i += size;
139
140 if (unlikely(j == st->nents - 1)) {
141 if (i > npages)
142 size -= (i - npages);
143 sg_mark_end(sgl);
144 }
145 sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
146 }
147 xe_assert(xe, i == npages);
148
149 return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
150 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
151}
152
153static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
154{
155 struct xe_userptr *userptr = &uvma->userptr;
156 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
157
158 lockdep_assert_held_write(&vm->lock);
159 lockdep_assert_held(&vm->userptr.notifier_lock);
160
161 mutex_lock(&userptr->unmap_mutex);
162 xe_assert(vm->xe, !userptr->mapped);
163 userptr->mapped = true;
164 mutex_unlock(&userptr->unmap_mutex);
165}
166
167void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
168{
169 struct xe_userptr *userptr = &uvma->userptr;
170 struct xe_vma *vma = &uvma->vma;
171 bool write = !xe_vma_read_only(vma);
172 struct xe_vm *vm = xe_vma_vm(vma);
173 struct xe_device *xe = vm->xe;
174
175 if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
176 !lockdep_is_held_type(&vm->lock, 0) &&
177 !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
178 /* Don't unmap in exec critical section. */
179 xe_vm_assert_held(vm);
180 /* Don't unmap while mapping the sg. */
181 lockdep_assert_held(&vm->lock);
182 }
183
184 mutex_lock(&userptr->unmap_mutex);
185 if (userptr->sg && userptr->mapped)
186 dma_unmap_sgtable(xe->drm.dev, userptr->sg,
187 write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
188 userptr->mapped = false;
189 mutex_unlock(&userptr->unmap_mutex);
190}
191
192/**
193 * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
194 * @uvma: the userptr vma which hold the scatter gather table
195 *
196 * With function xe_userptr_populate_range, we allocate storage of
197 * the userptr sg table. This is a helper function to free this
198 * sg table, and dma unmap the address in the table.
199 */
200void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
201{
202 struct xe_userptr *userptr = &uvma->userptr;
203
204 xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
205 xe_hmm_userptr_unmap(uvma);
206 sg_free_table(userptr->sg);
207 userptr->sg = NULL;
208}
209
210/**
211 * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
212 * address range
213 *
214 * @uvma: userptr vma which has information of the range to populate.
215 * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller.
216 *
217 * This function populate the physical pages of a virtual
218 * address range. The populated physical pages is saved in
219 * userptr's sg table. It is similar to get_user_pages but call
220 * hmm_range_fault.
221 *
222 * This function also read mmu notifier sequence # (
223 * mmu_interval_read_begin), for the purpose of later
224 * comparison (through mmu_interval_read_retry).
225 *
226 * This must be called with mmap read or write lock held.
227 *
228 * This function allocates the storage of the userptr sg table.
229 * It is caller's responsibility to free it calling sg_free_table.
230 *
231 * returns: 0 for succuss; negative error no on failure
232 */
233int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
234 bool is_mm_mmap_locked)
235{
236 unsigned long timeout =
237 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
238 unsigned long *pfns;
239 struct xe_userptr *userptr;
240 struct xe_vma *vma = &uvma->vma;
241 u64 userptr_start = xe_vma_userptr(vma);
242 u64 userptr_end = userptr_start + xe_vma_size(vma);
243 struct xe_vm *vm = xe_vma_vm(vma);
244 struct hmm_range hmm_range = {
245 .pfn_flags_mask = 0, /* ignore pfns */
246 .default_flags = HMM_PFN_REQ_FAULT,
247 .start = userptr_start,
248 .end = userptr_end,
249 .notifier = &uvma->userptr.notifier,
250 .dev_private_owner = vm->xe,
251 };
252 bool write = !xe_vma_read_only(vma);
253 unsigned long notifier_seq;
254 u64 npages;
255 int ret;
256
257 userptr = &uvma->userptr;
258
259 if (is_mm_mmap_locked)
260 mmap_assert_locked(userptr->notifier.mm);
261
262 if (vma->gpuva.flags & XE_VMA_DESTROYED)
263 return 0;
264
265 notifier_seq = mmu_interval_read_begin(&userptr->notifier);
266 if (notifier_seq == userptr->notifier_seq)
267 return 0;
268
269 if (userptr->sg)
270 xe_hmm_userptr_free_sg(uvma);
271
272 npages = xe_npages_in_range(userptr_start, userptr_end);
273 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
274 if (unlikely(!pfns))
275 return -ENOMEM;
276
277 if (write)
278 hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
279
280 if (!mmget_not_zero(userptr->notifier.mm)) {
281 ret = -EFAULT;
282 goto free_pfns;
283 }
284
285 hmm_range.hmm_pfns = pfns;
286
287 while (true) {
288 hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
289
290 if (!is_mm_mmap_locked)
291 mmap_read_lock(userptr->notifier.mm);
292
293 ret = hmm_range_fault(&hmm_range);
294
295 if (!is_mm_mmap_locked)
296 mmap_read_unlock(userptr->notifier.mm);
297
298 if (ret == -EBUSY) {
299 if (time_after(jiffies, timeout))
300 break;
301
302 continue;
303 }
304 break;
305 }
306
307 mmput(userptr->notifier.mm);
308
309 if (ret)
310 goto free_pfns;
311
312 ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
313 if (ret)
314 goto free_pfns;
315
316 ret = down_read_interruptible(&vm->userptr.notifier_lock);
317 if (ret)
318 goto free_st;
319
320 if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
321 ret = -EAGAIN;
322 goto out_unlock;
323 }
324
325 ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
326 &vm->userptr.notifier_lock, write);
327 if (ret)
328 goto out_unlock;
329
330 xe_mark_range_accessed(&hmm_range, write);
331 userptr->sg = &userptr->sgt;
332 xe_hmm_userptr_set_mapped(uvma);
333 userptr->notifier_seq = hmm_range.notifier_seq;
334 up_read(&vm->userptr.notifier_lock);
335 kvfree(pfns);
336 return 0;
337
338out_unlock:
339 up_read(&vm->userptr.notifier_lock);
340free_st:
341 sg_free_table(&userptr->sgt);
342free_pfns:
343 kvfree(pfns);
344 return ret;
345}