Loading...
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_xcp.h"
25#include "amdgpu_drv.h"
26
27#include <drm/drm_drv.h>
28#include "../amdxcp/amdgpu_xcp_drv.h"
29
30static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
31 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
32{
33 int (*run_func)(void *handle, uint32_t inst_mask);
34 int ret = 0;
35
36 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
37 return 0;
38
39 run_func = NULL;
40
41 switch (xcp_state) {
42 case AMDGPU_XCP_PREPARE_SUSPEND:
43 run_func = xcp_ip->ip_funcs->prepare_suspend;
44 break;
45 case AMDGPU_XCP_SUSPEND:
46 run_func = xcp_ip->ip_funcs->suspend;
47 break;
48 case AMDGPU_XCP_PREPARE_RESUME:
49 run_func = xcp_ip->ip_funcs->prepare_resume;
50 break;
51 case AMDGPU_XCP_RESUME:
52 run_func = xcp_ip->ip_funcs->resume;
53 break;
54 }
55
56 if (run_func)
57 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
58
59 return ret;
60}
61
62static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
63 int state)
64{
65 struct amdgpu_xcp_ip *xcp_ip;
66 struct amdgpu_xcp *xcp;
67 int i, ret;
68
69 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
70 return -EINVAL;
71
72 xcp = &xcp_mgr->xcp[xcp_id];
73 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
74 xcp_ip = &xcp->ip[i];
75 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
76 if (ret)
77 break;
78 }
79
80 return ret;
81}
82
83int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
84{
85 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
86 AMDGPU_XCP_PREPARE_SUSPEND);
87}
88
89int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
90{
91 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
92}
93
94int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
95{
96 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
97 AMDGPU_XCP_PREPARE_RESUME);
98}
99
100int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
101{
102 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
103}
104
105static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
106 struct amdgpu_xcp_ip *ip)
107{
108 struct amdgpu_xcp *xcp;
109
110 if (!ip)
111 return;
112
113 xcp = &xcp_mgr->xcp[xcp_id];
114 xcp->ip[ip->ip_id] = *ip;
115 xcp->ip[ip->ip_id].valid = true;
116
117 xcp->valid = true;
118}
119
120int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
121{
122 struct amdgpu_device *adev = xcp_mgr->adev;
123 struct amdgpu_xcp_ip ip;
124 uint8_t mem_id;
125 int i, j, ret;
126
127 if (!num_xcps || num_xcps > MAX_XCP)
128 return -EINVAL;
129
130 xcp_mgr->mode = mode;
131
132 for (i = 0; i < MAX_XCP; ++i)
133 xcp_mgr->xcp[i].valid = false;
134
135 /* This is needed for figuring out memory id of xcp */
136 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
137
138 for (i = 0; i < num_xcps; ++i) {
139 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
140 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
141 &ip);
142 if (ret)
143 continue;
144
145 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
146 }
147
148 xcp_mgr->xcp[i].id = i;
149
150 if (xcp_mgr->funcs->get_xcp_mem_id) {
151 ret = xcp_mgr->funcs->get_xcp_mem_id(
152 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
153 if (ret)
154 continue;
155 else
156 xcp_mgr->xcp[i].mem_id = mem_id;
157 }
158 }
159
160 xcp_mgr->num_xcps = num_xcps;
161 amdgpu_xcp_update_partition_sched_list(adev);
162
163 return 0;
164}
165
166static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
167 int mode)
168{
169 int ret, curr_mode, num_xcps = 0;
170
171 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
172 return 0;
173
174 mutex_lock(&xcp_mgr->xcp_lock);
175
176 curr_mode = xcp_mgr->mode;
177 /* State set to transient mode */
178 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
179
180 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
181
182 if (ret) {
183 /* Failed, get whatever mode it's at now */
184 if (xcp_mgr->funcs->query_partition_mode)
185 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
186 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
187 else
188 xcp_mgr->mode = curr_mode;
189
190 goto out;
191 }
192
193out:
194 mutex_unlock(&xcp_mgr->xcp_lock);
195
196 return ret;
197}
198
199int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
200{
201 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
202 return -EINVAL;
203
204 if (xcp_mgr->mode == mode)
205 return 0;
206
207 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
208}
209
210int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
211{
212 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
213 return 0;
214
215 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
216}
217
218int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
219{
220 int mode;
221
222 if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
223 return xcp_mgr->mode;
224
225 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
226 return xcp_mgr->mode;
227
228 if (!(flags & AMDGPU_XCP_FL_LOCKED))
229 mutex_lock(&xcp_mgr->xcp_lock);
230 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
231 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
232 dev_WARN(
233 xcp_mgr->adev->dev,
234 "Cached partition mode %d not matching with device mode %d",
235 xcp_mgr->mode, mode);
236
237 if (!(flags & AMDGPU_XCP_FL_LOCKED))
238 mutex_unlock(&xcp_mgr->xcp_lock);
239
240 return mode;
241}
242
243static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
244{
245 struct drm_device *p_ddev;
246 struct drm_device *ddev;
247 int i, ret;
248
249 ddev = adev_to_drm(adev);
250
251 /* xcp #0 shares drm device setting with adev */
252 adev->xcp_mgr->xcp->ddev = ddev;
253
254 for (i = 1; i < MAX_XCP; i++) {
255 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
256 if (ret == -ENOSPC) {
257 dev_warn(adev->dev,
258 "Skip xcp node #%d when out of drm node resource.", i);
259 return 0;
260 } else if (ret) {
261 return ret;
262 }
263
264 /* Redirect all IOCTLs to the primary device */
265 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
266 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
267 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
268 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
269 p_ddev->render->dev = ddev;
270 p_ddev->primary->dev = ddev;
271 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
272 p_ddev->driver = &amdgpu_partition_driver;
273 adev->xcp_mgr->xcp[i].ddev = p_ddev;
274 }
275
276 return 0;
277}
278
279int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
280 int init_num_xcps,
281 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
282{
283 struct amdgpu_xcp_mgr *xcp_mgr;
284
285 if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
286 !xcp_funcs->get_ip_details)
287 return -EINVAL;
288
289 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
290
291 if (!xcp_mgr)
292 return -ENOMEM;
293
294 xcp_mgr->adev = adev;
295 xcp_mgr->funcs = xcp_funcs;
296 xcp_mgr->mode = init_mode;
297 mutex_init(&xcp_mgr->xcp_lock);
298
299 if (init_mode != AMDGPU_XCP_MODE_NONE)
300 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
301
302 adev->xcp_mgr = xcp_mgr;
303
304 return amdgpu_xcp_dev_alloc(adev);
305}
306
307int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
308 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
309{
310 struct amdgpu_xcp *xcp;
311 int i, id_mask = 0;
312
313 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
314 return -EINVAL;
315
316 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
317 xcp = &xcp_mgr->xcp[i];
318 if ((xcp->valid) && (xcp->ip[ip].valid) &&
319 (xcp->ip[ip].inst_mask & BIT(instance)))
320 id_mask |= BIT(i);
321 }
322
323 if (!id_mask)
324 id_mask = -ENXIO;
325
326 return id_mask;
327}
328
329int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
330 enum AMDGPU_XCP_IP_BLOCK ip,
331 uint32_t *inst_mask)
332{
333 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
334 return -EINVAL;
335
336 *inst_mask = xcp->ip[ip].inst_mask;
337
338 return 0;
339}
340
341int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
342 const struct pci_device_id *ent)
343{
344 int i, ret;
345
346 if (!adev->xcp_mgr)
347 return 0;
348
349 for (i = 1; i < MAX_XCP; i++) {
350 if (!adev->xcp_mgr->xcp[i].ddev)
351 break;
352
353 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
354 if (ret)
355 return ret;
356 }
357
358 return 0;
359}
360
361void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
362{
363 struct drm_device *p_ddev;
364 int i;
365
366 if (!adev->xcp_mgr)
367 return;
368
369 for (i = 1; i < MAX_XCP; i++) {
370 if (!adev->xcp_mgr->xcp[i].ddev)
371 break;
372
373 p_ddev = adev->xcp_mgr->xcp[i].ddev;
374 drm_dev_unplug(p_ddev);
375 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
376 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
377 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
378 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
379 }
380}
381
382int amdgpu_xcp_open_device(struct amdgpu_device *adev,
383 struct amdgpu_fpriv *fpriv,
384 struct drm_file *file_priv)
385{
386 int i;
387
388 if (!adev->xcp_mgr)
389 return 0;
390
391 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
392 for (i = 0; i < MAX_XCP; ++i) {
393 if (!adev->xcp_mgr->xcp[i].ddev)
394 break;
395
396 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
397 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
398 dev_err(adev->dev, "renderD%d partition %d not valid!",
399 file_priv->minor->index, i);
400 return -ENOENT;
401 }
402 dev_dbg(adev->dev, "renderD%d partition %d opened!",
403 file_priv->minor->index, i);
404 fpriv->xcp_id = i;
405 break;
406 }
407 }
408
409 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
410 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
411 return 0;
412}
413
414void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
415 struct amdgpu_ctx_entity *entity)
416{
417 struct drm_gpu_scheduler *sched;
418 struct amdgpu_ring *ring;
419
420 if (!adev->xcp_mgr)
421 return;
422
423 sched = entity->entity.rq->sched;
424 if (sched->ready) {
425 ring = to_amdgpu_ring(entity->entity.rq->sched);
426 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
427 }
428}
429
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_xcp.h"
25#include "amdgpu_drv.h"
26
27#include <drm/drm_drv.h>
28#include "../amdxcp/amdgpu_xcp_drv.h"
29
30static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
31 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
32{
33 int (*run_func)(void *handle, uint32_t inst_mask);
34 int ret = 0;
35
36 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
37 return 0;
38
39 run_func = NULL;
40
41 switch (xcp_state) {
42 case AMDGPU_XCP_PREPARE_SUSPEND:
43 run_func = xcp_ip->ip_funcs->prepare_suspend;
44 break;
45 case AMDGPU_XCP_SUSPEND:
46 run_func = xcp_ip->ip_funcs->suspend;
47 break;
48 case AMDGPU_XCP_PREPARE_RESUME:
49 run_func = xcp_ip->ip_funcs->prepare_resume;
50 break;
51 case AMDGPU_XCP_RESUME:
52 run_func = xcp_ip->ip_funcs->resume;
53 break;
54 }
55
56 if (run_func)
57 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
58
59 return ret;
60}
61
62static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
63 int state)
64{
65 struct amdgpu_xcp_ip *xcp_ip;
66 struct amdgpu_xcp *xcp;
67 int i, ret;
68
69 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
70 return -EINVAL;
71
72 xcp = &xcp_mgr->xcp[xcp_id];
73 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
74 xcp_ip = &xcp->ip[i];
75 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
76 if (ret)
77 break;
78 }
79
80 return ret;
81}
82
83int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
84{
85 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
86 AMDGPU_XCP_PREPARE_SUSPEND);
87}
88
89int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
90{
91 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
92}
93
94int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
95{
96 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
97 AMDGPU_XCP_PREPARE_RESUME);
98}
99
100int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
101{
102 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
103}
104
105static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
106 struct amdgpu_xcp_ip *ip)
107{
108 struct amdgpu_xcp *xcp;
109
110 if (!ip)
111 return;
112
113 xcp = &xcp_mgr->xcp[xcp_id];
114 xcp->ip[ip->ip_id] = *ip;
115 xcp->ip[ip->ip_id].valid = true;
116
117 xcp->valid = true;
118}
119
120int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
121{
122 struct amdgpu_device *adev = xcp_mgr->adev;
123 struct amdgpu_xcp_ip ip;
124 uint8_t mem_id;
125 int i, j, ret;
126
127 if (!num_xcps || num_xcps > MAX_XCP)
128 return -EINVAL;
129
130 xcp_mgr->mode = mode;
131
132 for (i = 0; i < MAX_XCP; ++i)
133 xcp_mgr->xcp[i].valid = false;
134
135 /* This is needed for figuring out memory id of xcp */
136 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
137
138 for (i = 0; i < num_xcps; ++i) {
139 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
140 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
141 &ip);
142 if (ret)
143 continue;
144
145 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
146 }
147
148 xcp_mgr->xcp[i].id = i;
149
150 if (xcp_mgr->funcs->get_xcp_mem_id) {
151 ret = xcp_mgr->funcs->get_xcp_mem_id(
152 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
153 if (ret)
154 continue;
155 else
156 xcp_mgr->xcp[i].mem_id = mem_id;
157 }
158 }
159
160 xcp_mgr->num_xcps = num_xcps;
161 amdgpu_xcp_update_partition_sched_list(adev);
162
163 return 0;
164}
165
166static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
167 int mode)
168{
169 int ret, curr_mode, num_xcps = 0;
170
171 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
172 return 0;
173
174 mutex_lock(&xcp_mgr->xcp_lock);
175
176 curr_mode = xcp_mgr->mode;
177 /* State set to transient mode */
178 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
179
180 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
181
182 if (ret) {
183 /* Failed, get whatever mode it's at now */
184 if (xcp_mgr->funcs->query_partition_mode)
185 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
186 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
187 else
188 xcp_mgr->mode = curr_mode;
189
190 goto out;
191 }
192
193out:
194 mutex_unlock(&xcp_mgr->xcp_lock);
195
196 return ret;
197}
198
199int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
200{
201 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
202 return -EINVAL;
203
204 if (xcp_mgr->mode == mode)
205 return 0;
206
207 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
208}
209
210int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
211{
212 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
213 return 0;
214
215 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
216}
217
218int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
219{
220 int mode;
221
222 if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
223 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
224 return xcp_mgr->mode;
225
226 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
227 return xcp_mgr->mode;
228
229 if (!(flags & AMDGPU_XCP_FL_LOCKED))
230 mutex_lock(&xcp_mgr->xcp_lock);
231 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
232
233 /* First time query for VF, set the mode here */
234 if (amdgpu_sriov_vf(xcp_mgr->adev) &&
235 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
236 xcp_mgr->mode = mode;
237
238 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
239 dev_WARN(
240 xcp_mgr->adev->dev,
241 "Cached partition mode %d not matching with device mode %d",
242 xcp_mgr->mode, mode);
243
244 if (!(flags & AMDGPU_XCP_FL_LOCKED))
245 mutex_unlock(&xcp_mgr->xcp_lock);
246
247 return mode;
248}
249
250static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
251{
252 struct drm_device *p_ddev;
253 struct drm_device *ddev;
254 int i, ret;
255
256 ddev = adev_to_drm(adev);
257
258 /* xcp #0 shares drm device setting with adev */
259 adev->xcp_mgr->xcp->ddev = ddev;
260
261 for (i = 1; i < MAX_XCP; i++) {
262 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
263 if (ret == -ENOSPC) {
264 dev_warn(adev->dev,
265 "Skip xcp node #%d when out of drm node resource.", i);
266 return 0;
267 } else if (ret) {
268 return ret;
269 }
270
271 /* Redirect all IOCTLs to the primary device */
272 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
273 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
274 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
275 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
276 p_ddev->render->dev = ddev;
277 p_ddev->primary->dev = ddev;
278 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
279 p_ddev->driver = &amdgpu_partition_driver;
280 adev->xcp_mgr->xcp[i].ddev = p_ddev;
281 }
282
283 return 0;
284}
285
286int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
287 int init_num_xcps,
288 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
289{
290 struct amdgpu_xcp_mgr *xcp_mgr;
291
292 if (!xcp_funcs || !xcp_funcs->get_ip_details)
293 return -EINVAL;
294
295 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
296
297 if (!xcp_mgr)
298 return -ENOMEM;
299
300 xcp_mgr->adev = adev;
301 xcp_mgr->funcs = xcp_funcs;
302 xcp_mgr->mode = init_mode;
303 mutex_init(&xcp_mgr->xcp_lock);
304
305 if (init_mode != AMDGPU_XCP_MODE_NONE)
306 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
307
308 adev->xcp_mgr = xcp_mgr;
309
310 return amdgpu_xcp_dev_alloc(adev);
311}
312
313int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
314 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
315{
316 struct amdgpu_xcp *xcp;
317 int i, id_mask = 0;
318
319 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
320 return -EINVAL;
321
322 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
323 xcp = &xcp_mgr->xcp[i];
324 if ((xcp->valid) && (xcp->ip[ip].valid) &&
325 (xcp->ip[ip].inst_mask & BIT(instance)))
326 id_mask |= BIT(i);
327 }
328
329 if (!id_mask)
330 id_mask = -ENXIO;
331
332 return id_mask;
333}
334
335int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
336 enum AMDGPU_XCP_IP_BLOCK ip,
337 uint32_t *inst_mask)
338{
339 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
340 return -EINVAL;
341
342 *inst_mask = xcp->ip[ip].inst_mask;
343
344 return 0;
345}
346
347int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
348 const struct pci_device_id *ent)
349{
350 int i, ret;
351
352 if (!adev->xcp_mgr)
353 return 0;
354
355 for (i = 1; i < MAX_XCP; i++) {
356 if (!adev->xcp_mgr->xcp[i].ddev)
357 break;
358
359 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
360 if (ret)
361 return ret;
362 }
363
364 return 0;
365}
366
367void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
368{
369 struct drm_device *p_ddev;
370 int i;
371
372 if (!adev->xcp_mgr)
373 return;
374
375 for (i = 1; i < MAX_XCP; i++) {
376 if (!adev->xcp_mgr->xcp[i].ddev)
377 break;
378
379 p_ddev = adev->xcp_mgr->xcp[i].ddev;
380 drm_dev_unplug(p_ddev);
381 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
382 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
383 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
384 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
385 }
386}
387
388int amdgpu_xcp_open_device(struct amdgpu_device *adev,
389 struct amdgpu_fpriv *fpriv,
390 struct drm_file *file_priv)
391{
392 int i;
393
394 if (!adev->xcp_mgr)
395 return 0;
396
397 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
398 for (i = 0; i < MAX_XCP; ++i) {
399 if (!adev->xcp_mgr->xcp[i].ddev)
400 break;
401
402 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
403 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
404 dev_err(adev->dev, "renderD%d partition %d not valid!",
405 file_priv->minor->index, i);
406 return -ENOENT;
407 }
408 dev_dbg(adev->dev, "renderD%d partition %d opened!",
409 file_priv->minor->index, i);
410 fpriv->xcp_id = i;
411 break;
412 }
413 }
414
415 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
416 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
417 return 0;
418}
419
420void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
421 struct amdgpu_ctx_entity *entity)
422{
423 struct drm_gpu_scheduler *sched;
424 struct amdgpu_ring *ring;
425
426 if (!adev->xcp_mgr)
427 return;
428
429 sched = entity->entity.rq->sched;
430 if (sched->ready) {
431 ring = to_amdgpu_ring(entity->entity.rq->sched);
432 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
433 }
434}
435
436#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
437 static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
438 struct amdgpu_xcp_res_details *xcp_res, char *buf) \
439 { \
440 return sysfs_emit(buf, "%d\n", xcp_res->_name); \
441 }
442
443struct amdgpu_xcp_res_sysfs_attribute {
444 struct attribute attr;
445 ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
446};
447
448#define XCP_CFG_SYSFS_RES_ATTR(_name) \
449 struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
450 .attr = { .name = __stringify(_name), .mode = 0400 }, \
451 .show = amdgpu_xcp_res_sysfs_##_name##_show, \
452 }
453
454XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
455XCP_CFG_SYSFS_RES_ATTR(num_inst);
456XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
457XCP_CFG_SYSFS_RES_ATTR(num_shared);
458
459#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
460
461static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
462 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
463 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
464};
465
466static const char *xcp_desc[] = {
467 [AMDGPU_SPX_PARTITION_MODE] = "SPX",
468 [AMDGPU_DPX_PARTITION_MODE] = "DPX",
469 [AMDGPU_TPX_PARTITION_MODE] = "TPX",
470 [AMDGPU_QPX_PARTITION_MODE] = "QPX",
471 [AMDGPU_CPX_PARTITION_MODE] = "CPX",
472};
473
474static const char *nps_desc[] = {
475 [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
476 [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
477 [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
478 [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
479 [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
480 [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
481 [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
482};
483
484ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
485
486#define to_xcp_attr(x) \
487 container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
488#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
489
490static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
491 struct attribute *attr, char *buf)
492{
493 struct amdgpu_xcp_res_sysfs_attribute *attribute;
494 struct amdgpu_xcp_res_details *xcp_res;
495
496 attribute = to_xcp_attr(attr);
497 xcp_res = to_xcp_res(kobj);
498
499 if (!attribute->show)
500 return -EIO;
501
502 return attribute->show(xcp_res, buf);
503}
504
505static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
506 .show = xcp_cfg_res_sysfs_attr_show,
507};
508
509static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
510 .sysfs_ops = &xcp_cfg_res_sysfs_ops,
511 .default_groups = xcp_cfg_res_sysfs_groups,
512};
513
514const char *xcp_res_names[] = {
515 [AMDGPU_XCP_RES_XCC] = "xcc",
516 [AMDGPU_XCP_RES_DMA] = "dma",
517 [AMDGPU_XCP_RES_DEC] = "dec",
518 [AMDGPU_XCP_RES_JPEG] = "jpeg",
519};
520
521static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
522 int mode,
523 struct amdgpu_xcp_cfg *xcp_cfg)
524{
525 if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
526 return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
527
528 return -EOPNOTSUPP;
529}
530
531#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
532static ssize_t supported_xcp_configs_show(struct kobject *kobj,
533 struct kobj_attribute *attr, char *buf)
534{
535 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
536 struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
537 int size = 0, mode;
538 char *sep = "";
539
540 if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
541 return sysfs_emit(buf, "Not supported\n");
542
543 for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
544 size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
545 sep = ", ";
546 }
547
548 size += sysfs_emit_at(buf, size, "\n");
549
550 return size;
551}
552
553static ssize_t supported_nps_configs_show(struct kobject *kobj,
554 struct kobj_attribute *attr, char *buf)
555{
556 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
557 int size = 0, mode;
558 char *sep = "";
559
560 if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
561 return sysfs_emit(buf, "Not supported\n");
562
563 for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
564 size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
565 sep = ", ";
566 }
567
568 size += sysfs_emit_at(buf, size, "\n");
569
570 return size;
571}
572
573static ssize_t xcp_config_show(struct kobject *kobj,
574 struct kobj_attribute *attr, char *buf)
575{
576 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
577
578 return sysfs_emit(buf, "%s\n",
579 amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
580}
581
582static ssize_t xcp_config_store(struct kobject *kobj,
583 struct kobj_attribute *attr,
584 const char *buf, size_t size)
585{
586 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
587 int mode, r;
588
589 if (!strncasecmp("SPX", buf, strlen("SPX")))
590 mode = AMDGPU_SPX_PARTITION_MODE;
591 else if (!strncasecmp("DPX", buf, strlen("DPX")))
592 mode = AMDGPU_DPX_PARTITION_MODE;
593 else if (!strncasecmp("TPX", buf, strlen("TPX")))
594 mode = AMDGPU_TPX_PARTITION_MODE;
595 else if (!strncasecmp("QPX", buf, strlen("QPX")))
596 mode = AMDGPU_QPX_PARTITION_MODE;
597 else if (!strncasecmp("CPX", buf, strlen("CPX")))
598 mode = AMDGPU_CPX_PARTITION_MODE;
599 else
600 return -EINVAL;
601
602 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
603
604 if (r)
605 return r;
606
607 xcp_cfg->mode = mode;
608 return size;
609}
610
611static struct kobj_attribute xcp_cfg_sysfs_mode =
612 __ATTR_RW_MODE(xcp_config, 0644);
613
614static void xcp_cfg_sysfs_release(struct kobject *kobj)
615{
616 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
617
618 kfree(xcp_cfg);
619}
620
621static const struct kobj_type xcp_cfg_sysfs_ktype = {
622 .release = xcp_cfg_sysfs_release,
623 .sysfs_ops = &kobj_sysfs_ops,
624};
625
626static struct kobj_attribute supp_part_sysfs_mode =
627 __ATTR_RO(supported_xcp_configs);
628
629static struct kobj_attribute supp_nps_sysfs_mode =
630 __ATTR_RO(supported_nps_configs);
631
632static const struct attribute *xcp_attrs[] = {
633 &supp_part_sysfs_mode.attr,
634 &xcp_cfg_sysfs_mode.attr,
635 NULL,
636};
637
638void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
639{
640 struct amdgpu_xcp_res_details *xcp_res;
641 struct amdgpu_xcp_cfg *xcp_cfg;
642 int i, r, j, rid, mode;
643
644 if (!adev->xcp_mgr)
645 return;
646
647 xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
648 if (!xcp_cfg)
649 return;
650 xcp_cfg->xcp_mgr = adev->xcp_mgr;
651
652 r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
653 &adev->dev->kobj, "compute_partition_config");
654 if (r)
655 goto err1;
656
657 r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
658 if (r)
659 goto err1;
660
661 if (adev->gmc.supported_nps_modes != 0) {
662 r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
663 if (r) {
664 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
665 goto err1;
666 }
667 }
668
669 mode = (xcp_cfg->xcp_mgr->mode ==
670 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
671 AMDGPU_SPX_PARTITION_MODE :
672 xcp_cfg->xcp_mgr->mode;
673 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
674 if (r) {
675 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
676 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
677 goto err1;
678 }
679
680 xcp_cfg->mode = mode;
681 for (i = 0; i < xcp_cfg->num_res; i++) {
682 xcp_res = &xcp_cfg->xcp_res[i];
683 rid = xcp_res->id;
684 r = kobject_init_and_add(&xcp_res->kobj,
685 &xcp_cfg_res_sysfs_ktype,
686 &xcp_cfg->kobj, "%s",
687 xcp_res_names[rid]);
688 if (r)
689 goto err;
690 }
691
692 adev->xcp_mgr->xcp_cfg = xcp_cfg;
693 return;
694err:
695 for (j = 0; j < i; j++) {
696 xcp_res = &xcp_cfg->xcp_res[i];
697 kobject_put(&xcp_res->kobj);
698 }
699
700 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
701 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
702err1:
703 kobject_put(&xcp_cfg->kobj);
704}
705
706void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
707{
708 struct amdgpu_xcp_res_details *xcp_res;
709 struct amdgpu_xcp_cfg *xcp_cfg;
710 int i;
711
712 if (!adev->xcp_mgr)
713 return;
714
715 xcp_cfg = adev->xcp_mgr->xcp_cfg;
716 for (i = 0; i < xcp_cfg->num_res; i++) {
717 xcp_res = &xcp_cfg->xcp_res[i];
718 kobject_put(&xcp_res->kobj);
719 }
720
721 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
722 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
723 kobject_put(&xcp_cfg->kobj);
724}