Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014 IBM Corp.
4 */
5
6#include <linux/workqueue.h>
7#include <linux/sched/signal.h>
8#include <linux/sched/mm.h>
9#include <linux/pid.h>
10#include <linux/mm.h>
11#include <linux/moduleparam.h>
12
13#undef MODULE_PARAM_PREFIX
14#define MODULE_PARAM_PREFIX "cxl" "."
15#include <asm/current.h>
16#include <asm/copro.h>
17#include <asm/mmu.h>
18
19#include "cxl.h"
20#include "trace.h"
21
22static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
23{
24 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
25 (sste->esid_data == cpu_to_be64(slb->esid)));
26}
27
28/*
29 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
30 * the segment table.
31 */
32static struct cxl_sste *find_free_sste(struct cxl_context *ctx,
33 struct copro_slb *slb)
34{
35 struct cxl_sste *primary, *sste, *ret = NULL;
36 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
37 unsigned int entry;
38 unsigned int hash;
39
40 if (slb->vsid & SLB_VSID_B_1T)
41 hash = (slb->esid >> SID_SHIFT_1T) & mask;
42 else /* 256M */
43 hash = (slb->esid >> SID_SHIFT) & mask;
44
45 primary = ctx->sstp + (hash << 3);
46
47 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
48 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
49 ret = sste;
50 if (sste_matches(sste, slb))
51 return NULL;
52 }
53 if (ret)
54 return ret;
55
56 /* Nothing free, select an entry to cast out */
57 ret = primary + ctx->sst_lru;
58 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
59
60 return ret;
61}
62
63static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
64{
65 /* mask is the group index, we search primary and secondary here. */
66 struct cxl_sste *sste;
67 unsigned long flags;
68
69 spin_lock_irqsave(&ctx->sste_lock, flags);
70 sste = find_free_sste(ctx, slb);
71 if (!sste)
72 goto out_unlock;
73
74 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
75 sste - ctx->sstp, slb->vsid, slb->esid);
76 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
77
78 sste->vsid_data = cpu_to_be64(slb->vsid);
79 sste->esid_data = cpu_to_be64(slb->esid);
80out_unlock:
81 spin_unlock_irqrestore(&ctx->sste_lock, flags);
82}
83
84static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
85 u64 ea)
86{
87 struct copro_slb slb = {0,0};
88 int rc;
89
90 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
91 cxl_load_segment(ctx, &slb);
92 }
93
94 return rc;
95}
96
97static void cxl_ack_ae(struct cxl_context *ctx)
98{
99 unsigned long flags;
100
101 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
102
103 spin_lock_irqsave(&ctx->lock, flags);
104 ctx->pending_fault = true;
105 ctx->fault_addr = ctx->dar;
106 ctx->fault_dsisr = ctx->dsisr;
107 spin_unlock_irqrestore(&ctx->lock, flags);
108
109 wake_up_all(&ctx->wq);
110}
111
112static int cxl_handle_segment_miss(struct cxl_context *ctx,
113 struct mm_struct *mm, u64 ea)
114{
115 int rc;
116
117 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
118 trace_cxl_ste_miss(ctx, ea);
119
120 if ((rc = cxl_fault_segment(ctx, mm, ea)))
121 cxl_ack_ae(ctx);
122 else {
123
124 mb(); /* Order seg table write to TFC MMIO write */
125 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
126 }
127
128 return IRQ_HANDLED;
129}
130
131int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
132{
133 vm_fault_t flt = 0;
134 int result;
135 unsigned long access, flags, inv_flags = 0;
136
137 /*
138 * Add the fault handling cpu to task mm cpumask so that we
139 * can do a safe lockless page table walk when inserting the
140 * hash page table entry. This function get called with a
141 * valid mm for user space addresses. Hence using the if (mm)
142 * check is sufficient here.
143 */
144 if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
145 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
146 /*
147 * We need to make sure we walk the table only after
148 * we update the cpumask. The other side of the barrier
149 * is explained in serialize_against_pte_lookup()
150 */
151 smp_mb();
152 }
153 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
154 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
155 return result;
156 }
157
158 if (!radix_enabled()) {
159 /*
160 * update_mmu_cache() will not have loaded the hash since current->trap
161 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
162 */
163 access = _PAGE_PRESENT | _PAGE_READ;
164 if (dsisr & CXL_PSL_DSISR_An_S)
165 access |= _PAGE_WRITE;
166
167 if (!mm && (get_region_id(dar) != USER_REGION_ID))
168 access |= _PAGE_PRIVILEGED;
169
170 if (dsisr & DSISR_NOHPTE)
171 inv_flags |= HPTE_NOHPTE_UPDATE;
172
173 local_irq_save(flags);
174 hash_page_mm(mm, dar, access, 0x300, inv_flags);
175 local_irq_restore(flags);
176 }
177 return 0;
178}
179
180static void cxl_handle_page_fault(struct cxl_context *ctx,
181 struct mm_struct *mm,
182 u64 dsisr, u64 dar)
183{
184 trace_cxl_pte_miss(ctx, dsisr, dar);
185
186 if (cxl_handle_mm_fault(mm, dsisr, dar)) {
187 cxl_ack_ae(ctx);
188 } else {
189 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
190 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
191 }
192}
193
194/*
195 * Returns the mm_struct corresponding to the context ctx.
196 * mm_users == 0, the context may be in the process of being closed.
197 */
198static struct mm_struct *get_mem_context(struct cxl_context *ctx)
199{
200 if (ctx->mm == NULL)
201 return NULL;
202
203 if (!mmget_not_zero(ctx->mm))
204 return NULL;
205
206 return ctx->mm;
207}
208
209static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
210{
211 if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
212 return true;
213
214 return false;
215}
216
217static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
218{
219 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
220 return true;
221
222 if (cxl_is_power9())
223 return true;
224
225 return false;
226}
227
228void cxl_handle_fault(struct work_struct *fault_work)
229{
230 struct cxl_context *ctx =
231 container_of(fault_work, struct cxl_context, fault_work);
232 u64 dsisr = ctx->dsisr;
233 u64 dar = ctx->dar;
234 struct mm_struct *mm = NULL;
235
236 if (cpu_has_feature(CPU_FTR_HVMODE)) {
237 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
238 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
239 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
240 /* Most likely explanation is harmless - a dedicated
241 * process has detached and these were cleared by the
242 * PSL purge, but warn about it just in case
243 */
244 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
245 return;
246 }
247 }
248
249 /* Early return if the context is being / has been detached */
250 if (ctx->status == CLOSED) {
251 cxl_ack_ae(ctx);
252 return;
253 }
254
255 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
256 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
257
258 if (!ctx->kernel) {
259
260 mm = get_mem_context(ctx);
261 if (mm == NULL) {
262 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
263 __func__, ctx->pe, pid_nr(ctx->pid));
264 cxl_ack_ae(ctx);
265 return;
266 } else {
267 pr_devel("Handling page fault for pe=%d pid=%i\n",
268 ctx->pe, pid_nr(ctx->pid));
269 }
270 }
271
272 if (cxl_is_segment_miss(ctx, dsisr))
273 cxl_handle_segment_miss(ctx, mm, dar);
274 else if (cxl_is_page_fault(ctx, dsisr))
275 cxl_handle_page_fault(ctx, mm, dsisr, dar);
276 else
277 WARN(1, "cxl_handle_fault has nothing to handle\n");
278
279 if (mm)
280 mmput(mm);
281}
282
283static u64 next_segment(u64 ea, u64 vsid)
284{
285 if (vsid & SLB_VSID_B_1T)
286 ea |= (1ULL << 40) - 1;
287 else
288 ea |= (1ULL << 28) - 1;
289
290 return ea + 1;
291}
292
293static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
294{
295 u64 ea, last_esid = 0;
296 struct copro_slb slb;
297 VMA_ITERATOR(vmi, mm, 0);
298 struct vm_area_struct *vma;
299 int rc;
300
301 mmap_read_lock(mm);
302 for_each_vma(vmi, vma) {
303 for (ea = vma->vm_start; ea < vma->vm_end;
304 ea = next_segment(ea, slb.vsid)) {
305 rc = copro_calculate_slb(mm, ea, &slb);
306 if (rc)
307 continue;
308
309 if (last_esid == slb.esid)
310 continue;
311
312 cxl_load_segment(ctx, &slb);
313 last_esid = slb.esid;
314 }
315 }
316 mmap_read_unlock(mm);
317}
318
319void cxl_prefault(struct cxl_context *ctx, u64 wed)
320{
321 struct mm_struct *mm = get_mem_context(ctx);
322
323 if (mm == NULL) {
324 pr_devel("cxl_prefault unable to get mm %i\n",
325 pid_nr(ctx->pid));
326 return;
327 }
328
329 switch (ctx->afu->prefault_mode) {
330 case CXL_PREFAULT_WED:
331 cxl_fault_segment(ctx, mm, wed);
332 break;
333 case CXL_PREFAULT_ALL:
334 cxl_prefault_vma(ctx, mm);
335 break;
336 default:
337 break;
338 }
339
340 mmput(mm);
341}
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/workqueue.h>
11#include <linux/sched.h>
12#include <linux/pid.h>
13#include <linux/mm.h>
14#include <linux/moduleparam.h>
15
16#undef MODULE_PARAM_PREFIX
17#define MODULE_PARAM_PREFIX "cxl" "."
18#include <asm/current.h>
19#include <asm/copro.h>
20#include <asm/mmu.h>
21
22#include "cxl.h"
23#include "trace.h"
24
25static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
26{
27 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
28 (sste->esid_data == cpu_to_be64(slb->esid)));
29}
30
31/*
32 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
33 * the segment table.
34 */
35static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
36 struct copro_slb *slb)
37{
38 struct cxl_sste *primary, *sste, *ret = NULL;
39 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
40 unsigned int entry;
41 unsigned int hash;
42
43 if (slb->vsid & SLB_VSID_B_1T)
44 hash = (slb->esid >> SID_SHIFT_1T) & mask;
45 else /* 256M */
46 hash = (slb->esid >> SID_SHIFT) & mask;
47
48 primary = ctx->sstp + (hash << 3);
49
50 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
51 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
52 ret = sste;
53 if (sste_matches(sste, slb))
54 return NULL;
55 }
56 if (ret)
57 return ret;
58
59 /* Nothing free, select an entry to cast out */
60 ret = primary + ctx->sst_lru;
61 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
62
63 return ret;
64}
65
66static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
67{
68 /* mask is the group index, we search primary and secondary here. */
69 struct cxl_sste *sste;
70 unsigned long flags;
71
72 spin_lock_irqsave(&ctx->sste_lock, flags);
73 sste = find_free_sste(ctx, slb);
74 if (!sste)
75 goto out_unlock;
76
77 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
78 sste - ctx->sstp, slb->vsid, slb->esid);
79 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
80
81 sste->vsid_data = cpu_to_be64(slb->vsid);
82 sste->esid_data = cpu_to_be64(slb->esid);
83out_unlock:
84 spin_unlock_irqrestore(&ctx->sste_lock, flags);
85}
86
87static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
88 u64 ea)
89{
90 struct copro_slb slb = {0,0};
91 int rc;
92
93 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
94 cxl_load_segment(ctx, &slb);
95 }
96
97 return rc;
98}
99
100static void cxl_ack_ae(struct cxl_context *ctx)
101{
102 unsigned long flags;
103
104 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
105
106 spin_lock_irqsave(&ctx->lock, flags);
107 ctx->pending_fault = true;
108 ctx->fault_addr = ctx->dar;
109 ctx->fault_dsisr = ctx->dsisr;
110 spin_unlock_irqrestore(&ctx->lock, flags);
111
112 wake_up_all(&ctx->wq);
113}
114
115static int cxl_handle_segment_miss(struct cxl_context *ctx,
116 struct mm_struct *mm, u64 ea)
117{
118 int rc;
119
120 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
121 trace_cxl_ste_miss(ctx, ea);
122
123 if ((rc = cxl_fault_segment(ctx, mm, ea)))
124 cxl_ack_ae(ctx);
125 else {
126
127 mb(); /* Order seg table write to TFC MMIO write */
128 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
129 }
130
131 return IRQ_HANDLED;
132}
133
134static void cxl_handle_page_fault(struct cxl_context *ctx,
135 struct mm_struct *mm, u64 dsisr, u64 dar)
136{
137 unsigned flt = 0;
138 int result;
139 unsigned long access, flags, inv_flags = 0;
140
141 trace_cxl_pte_miss(ctx, dsisr, dar);
142
143 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
144 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
145 return cxl_ack_ae(ctx);
146 }
147
148 /*
149 * update_mmu_cache() will not have loaded the hash since current->trap
150 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
151 */
152 access = _PAGE_PRESENT | _PAGE_READ;
153 if (dsisr & CXL_PSL_DSISR_An_S)
154 access |= _PAGE_WRITE;
155
156 access |= _PAGE_PRIVILEGED;
157 if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
158 access &= ~_PAGE_PRIVILEGED;
159
160 if (dsisr & DSISR_NOHPTE)
161 inv_flags |= HPTE_NOHPTE_UPDATE;
162
163 local_irq_save(flags);
164 hash_page_mm(mm, dar, access, 0x300, inv_flags);
165 local_irq_restore(flags);
166
167 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
168 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
169}
170
171/*
172 * Returns the mm_struct corresponding to the context ctx via ctx->pid
173 * In case the task has exited we use the task group leader accessible
174 * via ctx->glpid to find the next task in the thread group that has a
175 * valid mm_struct associated with it. If a task with valid mm_struct
176 * is found the ctx->pid is updated to use the task struct for subsequent
177 * translations. In case no valid mm_struct is found in the task group to
178 * service the fault a NULL is returned.
179 */
180static struct mm_struct *get_mem_context(struct cxl_context *ctx)
181{
182 struct task_struct *task = NULL;
183 struct mm_struct *mm = NULL;
184 struct pid *old_pid = ctx->pid;
185
186 if (old_pid == NULL) {
187 pr_warn("%s: Invalid context for pe=%d\n",
188 __func__, ctx->pe);
189 return NULL;
190 }
191
192 task = get_pid_task(old_pid, PIDTYPE_PID);
193
194 /*
195 * pid_alive may look racy but this saves us from costly
196 * get_task_mm when the task is a zombie. In worst case
197 * we may think a task is alive, which is about to die
198 * but get_task_mm will return NULL.
199 */
200 if (task != NULL && pid_alive(task))
201 mm = get_task_mm(task);
202
203 /* release the task struct that was taken earlier */
204 if (task)
205 put_task_struct(task);
206 else
207 pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
208 __func__, pid_nr(old_pid), ctx->pe);
209
210 /*
211 * If we couldn't find the mm context then use the group
212 * leader to iterate over the task group and find a task
213 * that gives us mm_struct.
214 */
215 if (unlikely(mm == NULL && ctx->glpid != NULL)) {
216
217 rcu_read_lock();
218 task = pid_task(ctx->glpid, PIDTYPE_PID);
219 if (task)
220 do {
221 mm = get_task_mm(task);
222 if (mm) {
223 ctx->pid = get_task_pid(task,
224 PIDTYPE_PID);
225 break;
226 }
227 task = next_thread(task);
228 } while (task && !thread_group_leader(task));
229 rcu_read_unlock();
230
231 /* check if we switched pid */
232 if (ctx->pid != old_pid) {
233 if (mm)
234 pr_devel("%s:pe=%i switch pid %i->%i\n",
235 __func__, ctx->pe, pid_nr(old_pid),
236 pid_nr(ctx->pid));
237 else
238 pr_devel("%s:Cannot find mm for pid=%i\n",
239 __func__, pid_nr(old_pid));
240
241 /* drop the reference to older pid */
242 put_pid(old_pid);
243 }
244 }
245
246 return mm;
247}
248
249
250
251void cxl_handle_fault(struct work_struct *fault_work)
252{
253 struct cxl_context *ctx =
254 container_of(fault_work, struct cxl_context, fault_work);
255 u64 dsisr = ctx->dsisr;
256 u64 dar = ctx->dar;
257 struct mm_struct *mm = NULL;
258
259 if (cpu_has_feature(CPU_FTR_HVMODE)) {
260 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
261 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
262 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
263 /* Most likely explanation is harmless - a dedicated
264 * process has detached and these were cleared by the
265 * PSL purge, but warn about it just in case
266 */
267 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
268 return;
269 }
270 }
271
272 /* Early return if the context is being / has been detached */
273 if (ctx->status == CLOSED) {
274 cxl_ack_ae(ctx);
275 return;
276 }
277
278 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
279 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
280
281 if (!ctx->kernel) {
282
283 mm = get_mem_context(ctx);
284 /* indicates all the thread in task group have exited */
285 if (mm == NULL) {
286 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
287 __func__, ctx->pe, pid_nr(ctx->pid));
288 cxl_ack_ae(ctx);
289 return;
290 } else {
291 pr_devel("Handling page fault for pe=%d pid=%i\n",
292 ctx->pe, pid_nr(ctx->pid));
293 }
294 }
295
296 if (dsisr & CXL_PSL_DSISR_An_DS)
297 cxl_handle_segment_miss(ctx, mm, dar);
298 else if (dsisr & CXL_PSL_DSISR_An_DM)
299 cxl_handle_page_fault(ctx, mm, dsisr, dar);
300 else
301 WARN(1, "cxl_handle_fault has nothing to handle\n");
302
303 if (mm)
304 mmput(mm);
305}
306
307static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
308{
309 struct mm_struct *mm;
310
311 mm = get_mem_context(ctx);
312 if (mm == NULL) {
313 pr_devel("cxl_prefault_one unable to get mm %i\n",
314 pid_nr(ctx->pid));
315 return;
316 }
317
318 cxl_fault_segment(ctx, mm, ea);
319
320 mmput(mm);
321}
322
323static u64 next_segment(u64 ea, u64 vsid)
324{
325 if (vsid & SLB_VSID_B_1T)
326 ea |= (1ULL << 40) - 1;
327 else
328 ea |= (1ULL << 28) - 1;
329
330 return ea + 1;
331}
332
333static void cxl_prefault_vma(struct cxl_context *ctx)
334{
335 u64 ea, last_esid = 0;
336 struct copro_slb slb;
337 struct vm_area_struct *vma;
338 int rc;
339 struct mm_struct *mm;
340
341 mm = get_mem_context(ctx);
342 if (mm == NULL) {
343 pr_devel("cxl_prefault_vm unable to get mm %i\n",
344 pid_nr(ctx->pid));
345 return;
346 }
347
348 down_read(&mm->mmap_sem);
349 for (vma = mm->mmap; vma; vma = vma->vm_next) {
350 for (ea = vma->vm_start; ea < vma->vm_end;
351 ea = next_segment(ea, slb.vsid)) {
352 rc = copro_calculate_slb(mm, ea, &slb);
353 if (rc)
354 continue;
355
356 if (last_esid == slb.esid)
357 continue;
358
359 cxl_load_segment(ctx, &slb);
360 last_esid = slb.esid;
361 }
362 }
363 up_read(&mm->mmap_sem);
364
365 mmput(mm);
366}
367
368void cxl_prefault(struct cxl_context *ctx, u64 wed)
369{
370 switch (ctx->afu->prefault_mode) {
371 case CXL_PREFAULT_WED:
372 cxl_prefault_one(ctx, wed);
373 break;
374 case CXL_PREFAULT_ALL:
375 cxl_prefault_vma(ctx);
376 break;
377 default:
378 break;
379 }
380}