Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include "amdgpu.h"
30#include <drm/amdgpu_drm.h>
31#include "amdgpu_sched.h"
32#include "amdgpu_uvd.h"
33#include "amdgpu_vce.h"
34
35#include <linux/vga_switcheroo.h>
36#include <linux/slab.h>
37#include <linux/pm_runtime.h>
38#include "amdgpu_amdkfd.h"
39
40/**
41 * amdgpu_driver_unload_kms - Main unload function for KMS.
42 *
43 * @dev: drm dev pointer
44 *
45 * This is the main unload function for KMS (all asics).
46 * Returns 0 on success.
47 */
48void amdgpu_driver_unload_kms(struct drm_device *dev)
49{
50 struct amdgpu_device *adev = dev->dev_private;
51
52 if (adev == NULL)
53 return;
54
55 if (adev->rmmio == NULL)
56 goto done_free;
57
58 if (amdgpu_sriov_vf(adev))
59 amdgpu_virt_request_full_gpu(adev, false);
60
61 if (amdgpu_device_is_px(dev)) {
62 pm_runtime_get_sync(dev->dev);
63 pm_runtime_forbid(dev->dev);
64 }
65
66 amdgpu_acpi_fini(adev);
67
68 amdgpu_device_fini(adev);
69
70done_free:
71 kfree(adev);
72 dev->dev_private = NULL;
73}
74
75/**
76 * amdgpu_driver_load_kms - Main load function for KMS.
77 *
78 * @dev: drm dev pointer
79 * @flags: device flags
80 *
81 * This is the main load function for KMS (all asics).
82 * Returns 0 on success, error on failure.
83 */
84int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
85{
86 struct amdgpu_device *adev;
87 int r, acpi_status;
88
89#ifdef CONFIG_DRM_AMDGPU_SI
90 if (!amdgpu_si_support) {
91 switch (flags & AMD_ASIC_MASK) {
92 case CHIP_TAHITI:
93 case CHIP_PITCAIRN:
94 case CHIP_VERDE:
95 case CHIP_OLAND:
96 case CHIP_HAINAN:
97 dev_info(dev->dev,
98 "SI support provided by radeon.\n");
99 dev_info(dev->dev,
100 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
101 );
102 return -ENODEV;
103 }
104 }
105#endif
106#ifdef CONFIG_DRM_AMDGPU_CIK
107 if (!amdgpu_cik_support) {
108 switch (flags & AMD_ASIC_MASK) {
109 case CHIP_KAVERI:
110 case CHIP_BONAIRE:
111 case CHIP_HAWAII:
112 case CHIP_KABINI:
113 case CHIP_MULLINS:
114 dev_info(dev->dev,
115 "CIK support provided by radeon.\n");
116 dev_info(dev->dev,
117 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
118 );
119 return -ENODEV;
120 }
121 }
122#endif
123
124 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
125 if (adev == NULL) {
126 return -ENOMEM;
127 }
128 dev->dev_private = (void *)adev;
129
130 if ((amdgpu_runtime_pm != 0) &&
131 amdgpu_has_atpx() &&
132 (amdgpu_is_atpx_hybrid() ||
133 amdgpu_has_atpx_dgpu_power_cntl()) &&
134 ((flags & AMD_IS_APU) == 0) &&
135 !pci_is_thunderbolt_attached(dev->pdev))
136 flags |= AMD_IS_PX;
137
138 /* amdgpu_device_init should report only fatal error
139 * like memory allocation failure or iomapping failure,
140 * or memory manager initialization failure, it must
141 * properly initialize the GPU MC controller and permit
142 * VRAM allocation
143 */
144 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
145 if (r) {
146 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
147 goto out;
148 }
149
150 /* Call ACPI methods: require modeset init
151 * but failure is not fatal
152 */
153 if (!r) {
154 acpi_status = amdgpu_acpi_init(adev);
155 if (acpi_status)
156 dev_dbg(&dev->pdev->dev,
157 "Error during ACPI methods call\n");
158 }
159
160 if (amdgpu_device_is_px(dev)) {
161 pm_runtime_use_autosuspend(dev->dev);
162 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
163 pm_runtime_set_active(dev->dev);
164 pm_runtime_allow(dev->dev);
165 pm_runtime_mark_last_busy(dev->dev);
166 pm_runtime_put_autosuspend(dev->dev);
167 }
168
169out:
170 if (r) {
171 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
172 if (adev->rmmio && amdgpu_device_is_px(dev))
173 pm_runtime_put_noidle(dev->dev);
174 amdgpu_driver_unload_kms(dev);
175 }
176
177 return r;
178}
179
180static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
181 struct drm_amdgpu_query_fw *query_fw,
182 struct amdgpu_device *adev)
183{
184 switch (query_fw->fw_type) {
185 case AMDGPU_INFO_FW_VCE:
186 fw_info->ver = adev->vce.fw_version;
187 fw_info->feature = adev->vce.fb_version;
188 break;
189 case AMDGPU_INFO_FW_UVD:
190 fw_info->ver = adev->uvd.fw_version;
191 fw_info->feature = 0;
192 break;
193 case AMDGPU_INFO_FW_VCN:
194 fw_info->ver = adev->vcn.fw_version;
195 fw_info->feature = 0;
196 break;
197 case AMDGPU_INFO_FW_GMC:
198 fw_info->ver = adev->gmc.fw_version;
199 fw_info->feature = 0;
200 break;
201 case AMDGPU_INFO_FW_GFX_ME:
202 fw_info->ver = adev->gfx.me_fw_version;
203 fw_info->feature = adev->gfx.me_feature_version;
204 break;
205 case AMDGPU_INFO_FW_GFX_PFP:
206 fw_info->ver = adev->gfx.pfp_fw_version;
207 fw_info->feature = adev->gfx.pfp_feature_version;
208 break;
209 case AMDGPU_INFO_FW_GFX_CE:
210 fw_info->ver = adev->gfx.ce_fw_version;
211 fw_info->feature = adev->gfx.ce_feature_version;
212 break;
213 case AMDGPU_INFO_FW_GFX_RLC:
214 fw_info->ver = adev->gfx.rlc_fw_version;
215 fw_info->feature = adev->gfx.rlc_feature_version;
216 break;
217 case AMDGPU_INFO_FW_GFX_MEC:
218 if (query_fw->index == 0) {
219 fw_info->ver = adev->gfx.mec_fw_version;
220 fw_info->feature = adev->gfx.mec_feature_version;
221 } else if (query_fw->index == 1) {
222 fw_info->ver = adev->gfx.mec2_fw_version;
223 fw_info->feature = adev->gfx.mec2_feature_version;
224 } else
225 return -EINVAL;
226 break;
227 case AMDGPU_INFO_FW_SMC:
228 fw_info->ver = adev->pm.fw_version;
229 fw_info->feature = 0;
230 break;
231 case AMDGPU_INFO_FW_SDMA:
232 if (query_fw->index >= adev->sdma.num_instances)
233 return -EINVAL;
234 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
235 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
236 break;
237 case AMDGPU_INFO_FW_SOS:
238 fw_info->ver = adev->psp.sos_fw_version;
239 fw_info->feature = adev->psp.sos_feature_version;
240 break;
241 case AMDGPU_INFO_FW_ASD:
242 fw_info->ver = adev->psp.asd_fw_version;
243 fw_info->feature = adev->psp.asd_feature_version;
244 break;
245 default:
246 return -EINVAL;
247 }
248 return 0;
249}
250
251/*
252 * Userspace get information ioctl
253 */
254/**
255 * amdgpu_info_ioctl - answer a device specific request.
256 *
257 * @adev: amdgpu device pointer
258 * @data: request object
259 * @filp: drm filp
260 *
261 * This function is used to pass device specific parameters to the userspace
262 * drivers. Examples include: pci device id, pipeline parms, tiling params,
263 * etc. (all asics).
264 * Returns 0 on success, -EINVAL on failure.
265 */
266static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
267{
268 struct amdgpu_device *adev = dev->dev_private;
269 struct drm_amdgpu_info *info = data;
270 struct amdgpu_mode_info *minfo = &adev->mode_info;
271 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
272 uint32_t size = info->return_size;
273 struct drm_crtc *crtc;
274 uint32_t ui32 = 0;
275 uint64_t ui64 = 0;
276 int i, found;
277 int ui32_size = sizeof(ui32);
278
279 if (!info->return_size || !info->return_pointer)
280 return -EINVAL;
281
282 switch (info->query) {
283 case AMDGPU_INFO_ACCEL_WORKING:
284 ui32 = adev->accel_working;
285 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
286 case AMDGPU_INFO_CRTC_FROM_ID:
287 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
288 crtc = (struct drm_crtc *)minfo->crtcs[i];
289 if (crtc && crtc->base.id == info->mode_crtc.id) {
290 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
291 ui32 = amdgpu_crtc->crtc_id;
292 found = 1;
293 break;
294 }
295 }
296 if (!found) {
297 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
298 return -EINVAL;
299 }
300 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
301 case AMDGPU_INFO_HW_IP_INFO: {
302 struct drm_amdgpu_info_hw_ip ip = {};
303 enum amd_ip_block_type type;
304 uint32_t ring_mask = 0;
305 uint32_t ib_start_alignment = 0;
306 uint32_t ib_size_alignment = 0;
307
308 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
309 return -EINVAL;
310
311 switch (info->query_hw_ip.type) {
312 case AMDGPU_HW_IP_GFX:
313 type = AMD_IP_BLOCK_TYPE_GFX;
314 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
315 ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
316 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
317 ib_size_alignment = 8;
318 break;
319 case AMDGPU_HW_IP_COMPUTE:
320 type = AMD_IP_BLOCK_TYPE_GFX;
321 for (i = 0; i < adev->gfx.num_compute_rings; i++)
322 ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
323 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
324 ib_size_alignment = 8;
325 break;
326 case AMDGPU_HW_IP_DMA:
327 type = AMD_IP_BLOCK_TYPE_SDMA;
328 for (i = 0; i < adev->sdma.num_instances; i++)
329 ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
330 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
331 ib_size_alignment = 1;
332 break;
333 case AMDGPU_HW_IP_UVD:
334 type = AMD_IP_BLOCK_TYPE_UVD;
335 ring_mask = adev->uvd.ring.ready ? 1 : 0;
336 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
337 ib_size_alignment = 16;
338 break;
339 case AMDGPU_HW_IP_VCE:
340 type = AMD_IP_BLOCK_TYPE_VCE;
341 for (i = 0; i < adev->vce.num_rings; i++)
342 ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
343 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
344 ib_size_alignment = 1;
345 break;
346 case AMDGPU_HW_IP_UVD_ENC:
347 type = AMD_IP_BLOCK_TYPE_UVD;
348 for (i = 0; i < adev->uvd.num_enc_rings; i++)
349 ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
350 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
351 ib_size_alignment = 1;
352 break;
353 case AMDGPU_HW_IP_VCN_DEC:
354 type = AMD_IP_BLOCK_TYPE_VCN;
355 ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
356 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
357 ib_size_alignment = 16;
358 break;
359 case AMDGPU_HW_IP_VCN_ENC:
360 type = AMD_IP_BLOCK_TYPE_VCN;
361 for (i = 0; i < adev->vcn.num_enc_rings; i++)
362 ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
363 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
364 ib_size_alignment = 1;
365 break;
366 default:
367 return -EINVAL;
368 }
369
370 for (i = 0; i < adev->num_ip_blocks; i++) {
371 if (adev->ip_blocks[i].version->type == type &&
372 adev->ip_blocks[i].status.valid) {
373 ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
374 ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
375 ip.capabilities_flags = 0;
376 ip.available_rings = ring_mask;
377 ip.ib_start_alignment = ib_start_alignment;
378 ip.ib_size_alignment = ib_size_alignment;
379 break;
380 }
381 }
382 return copy_to_user(out, &ip,
383 min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
384 }
385 case AMDGPU_INFO_HW_IP_COUNT: {
386 enum amd_ip_block_type type;
387 uint32_t count = 0;
388
389 switch (info->query_hw_ip.type) {
390 case AMDGPU_HW_IP_GFX:
391 type = AMD_IP_BLOCK_TYPE_GFX;
392 break;
393 case AMDGPU_HW_IP_COMPUTE:
394 type = AMD_IP_BLOCK_TYPE_GFX;
395 break;
396 case AMDGPU_HW_IP_DMA:
397 type = AMD_IP_BLOCK_TYPE_SDMA;
398 break;
399 case AMDGPU_HW_IP_UVD:
400 type = AMD_IP_BLOCK_TYPE_UVD;
401 break;
402 case AMDGPU_HW_IP_VCE:
403 type = AMD_IP_BLOCK_TYPE_VCE;
404 break;
405 case AMDGPU_HW_IP_UVD_ENC:
406 type = AMD_IP_BLOCK_TYPE_UVD;
407 break;
408 case AMDGPU_HW_IP_VCN_DEC:
409 case AMDGPU_HW_IP_VCN_ENC:
410 type = AMD_IP_BLOCK_TYPE_VCN;
411 break;
412 default:
413 return -EINVAL;
414 }
415
416 for (i = 0; i < adev->num_ip_blocks; i++)
417 if (adev->ip_blocks[i].version->type == type &&
418 adev->ip_blocks[i].status.valid &&
419 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
420 count++;
421
422 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
423 }
424 case AMDGPU_INFO_TIMESTAMP:
425 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
426 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
427 case AMDGPU_INFO_FW_VERSION: {
428 struct drm_amdgpu_info_firmware fw_info;
429 int ret;
430
431 /* We only support one instance of each IP block right now. */
432 if (info->query_fw.ip_instance != 0)
433 return -EINVAL;
434
435 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
436 if (ret)
437 return ret;
438
439 return copy_to_user(out, &fw_info,
440 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
441 }
442 case AMDGPU_INFO_NUM_BYTES_MOVED:
443 ui64 = atomic64_read(&adev->num_bytes_moved);
444 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
445 case AMDGPU_INFO_NUM_EVICTIONS:
446 ui64 = atomic64_read(&adev->num_evictions);
447 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
448 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
449 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
450 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
451 case AMDGPU_INFO_VRAM_USAGE:
452 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
453 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
454 case AMDGPU_INFO_VIS_VRAM_USAGE:
455 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
456 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
457 case AMDGPU_INFO_GTT_USAGE:
458 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
459 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
460 case AMDGPU_INFO_GDS_CONFIG: {
461 struct drm_amdgpu_info_gds gds_info;
462
463 memset(&gds_info, 0, sizeof(gds_info));
464 gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
465 gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
466 gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
467 gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
468 gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
469 gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
470 gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
471 return copy_to_user(out, &gds_info,
472 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
473 }
474 case AMDGPU_INFO_VRAM_GTT: {
475 struct drm_amdgpu_info_vram_gtt vram_gtt;
476
477 vram_gtt.vram_size = adev->gmc.real_vram_size;
478 vram_gtt.vram_size -= adev->vram_pin_size;
479 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
480 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
481 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
482 vram_gtt.gtt_size *= PAGE_SIZE;
483 vram_gtt.gtt_size -= adev->gart_pin_size;
484 return copy_to_user(out, &vram_gtt,
485 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
486 }
487 case AMDGPU_INFO_MEMORY: {
488 struct drm_amdgpu_memory_info mem;
489
490 memset(&mem, 0, sizeof(mem));
491 mem.vram.total_heap_size = adev->gmc.real_vram_size;
492 mem.vram.usable_heap_size =
493 adev->gmc.real_vram_size - adev->vram_pin_size;
494 mem.vram.heap_usage =
495 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
496 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
497
498 mem.cpu_accessible_vram.total_heap_size =
499 adev->gmc.visible_vram_size;
500 mem.cpu_accessible_vram.usable_heap_size =
501 adev->gmc.visible_vram_size -
502 (adev->vram_pin_size - adev->invisible_pin_size);
503 mem.cpu_accessible_vram.heap_usage =
504 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
505 mem.cpu_accessible_vram.max_allocation =
506 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
507
508 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
509 mem.gtt.total_heap_size *= PAGE_SIZE;
510 mem.gtt.usable_heap_size = mem.gtt.total_heap_size
511 - adev->gart_pin_size;
512 mem.gtt.heap_usage =
513 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
514 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
515
516 return copy_to_user(out, &mem,
517 min((size_t)size, sizeof(mem)))
518 ? -EFAULT : 0;
519 }
520 case AMDGPU_INFO_READ_MMR_REG: {
521 unsigned n, alloc_size;
522 uint32_t *regs;
523 unsigned se_num = (info->read_mmr_reg.instance >>
524 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
525 AMDGPU_INFO_MMR_SE_INDEX_MASK;
526 unsigned sh_num = (info->read_mmr_reg.instance >>
527 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
528 AMDGPU_INFO_MMR_SH_INDEX_MASK;
529
530 /* set full masks if the userspace set all bits
531 * in the bitfields */
532 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
533 se_num = 0xffffffff;
534 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
535 sh_num = 0xffffffff;
536
537 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
538 if (!regs)
539 return -ENOMEM;
540 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
541
542 for (i = 0; i < info->read_mmr_reg.count; i++)
543 if (amdgpu_asic_read_register(adev, se_num, sh_num,
544 info->read_mmr_reg.dword_offset + i,
545 ®s[i])) {
546 DRM_DEBUG_KMS("unallowed offset %#x\n",
547 info->read_mmr_reg.dword_offset + i);
548 kfree(regs);
549 return -EFAULT;
550 }
551 n = copy_to_user(out, regs, min(size, alloc_size));
552 kfree(regs);
553 return n ? -EFAULT : 0;
554 }
555 case AMDGPU_INFO_DEV_INFO: {
556 struct drm_amdgpu_info_device dev_info = {};
557 uint64_t vm_size;
558
559 dev_info.device_id = dev->pdev->device;
560 dev_info.chip_rev = adev->rev_id;
561 dev_info.external_rev = adev->external_rev_id;
562 dev_info.pci_rev = dev->pdev->revision;
563 dev_info.family = adev->family;
564 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
565 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
566 /* return all clocks in KHz */
567 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
568 if (adev->pm.dpm_enabled) {
569 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
570 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
571 } else {
572 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
573 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
574 }
575 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
576 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
577 adev->gfx.config.max_shader_engines;
578 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
579 dev_info._pad = 0;
580 dev_info.ids_flags = 0;
581 if (adev->flags & AMD_IS_APU)
582 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
583 if (amdgpu_sriov_vf(adev))
584 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
585
586 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
587 vm_size -= AMDGPU_VA_RESERVED_SIZE;
588
589 /* Older VCE FW versions are buggy and can handle only 40bits */
590 if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
591 vm_size = min(vm_size, 1ULL << 40);
592
593 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
594 dev_info.virtual_address_max =
595 min(vm_size, AMDGPU_VA_HOLE_START);
596
597 if (vm_size > AMDGPU_VA_HOLE_START) {
598 dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
599 dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
600 }
601 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
602 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
603 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
604 dev_info.cu_active_number = adev->gfx.cu_info.number;
605 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
606 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
607 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
608 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
609 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
610 sizeof(adev->gfx.cu_info.bitmap));
611 dev_info.vram_type = adev->gmc.vram_type;
612 dev_info.vram_bit_width = adev->gmc.vram_width;
613 dev_info.vce_harvest_config = adev->vce.harvest_config;
614 dev_info.gc_double_offchip_lds_buf =
615 adev->gfx.config.double_offchip_lds_buf;
616
617 if (amdgpu_ngg) {
618 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
619 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
620 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
621 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
622 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
623 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
624 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
625 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
626 }
627 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
628 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
629 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
630 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
631 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
632 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
633 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
634
635 return copy_to_user(out, &dev_info,
636 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
637 }
638 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
639 unsigned i;
640 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
641 struct amd_vce_state *vce_state;
642
643 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
644 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
645 if (vce_state) {
646 vce_clk_table.entries[i].sclk = vce_state->sclk;
647 vce_clk_table.entries[i].mclk = vce_state->mclk;
648 vce_clk_table.entries[i].eclk = vce_state->evclk;
649 vce_clk_table.num_valid_entries++;
650 }
651 }
652
653 return copy_to_user(out, &vce_clk_table,
654 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
655 }
656 case AMDGPU_INFO_VBIOS: {
657 uint32_t bios_size = adev->bios_size;
658
659 switch (info->vbios_info.type) {
660 case AMDGPU_INFO_VBIOS_SIZE:
661 return copy_to_user(out, &bios_size,
662 min((size_t)size, sizeof(bios_size)))
663 ? -EFAULT : 0;
664 case AMDGPU_INFO_VBIOS_IMAGE: {
665 uint8_t *bios;
666 uint32_t bios_offset = info->vbios_info.offset;
667
668 if (bios_offset >= bios_size)
669 return -EINVAL;
670
671 bios = adev->bios + bios_offset;
672 return copy_to_user(out, bios,
673 min((size_t)size, (size_t)(bios_size - bios_offset)))
674 ? -EFAULT : 0;
675 }
676 default:
677 DRM_DEBUG_KMS("Invalid request %d\n",
678 info->vbios_info.type);
679 return -EINVAL;
680 }
681 }
682 case AMDGPU_INFO_NUM_HANDLES: {
683 struct drm_amdgpu_info_num_handles handle;
684
685 switch (info->query_hw_ip.type) {
686 case AMDGPU_HW_IP_UVD:
687 /* Starting Polaris, we support unlimited UVD handles */
688 if (adev->asic_type < CHIP_POLARIS10) {
689 handle.uvd_max_handles = adev->uvd.max_handles;
690 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
691
692 return copy_to_user(out, &handle,
693 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
694 } else {
695 return -ENODATA;
696 }
697
698 break;
699 default:
700 return -EINVAL;
701 }
702 }
703 case AMDGPU_INFO_SENSOR: {
704 struct pp_gpu_power query = {0};
705 int query_size = sizeof(query);
706
707 if (amdgpu_dpm == 0)
708 return -ENOENT;
709
710 switch (info->sensor_info.type) {
711 case AMDGPU_INFO_SENSOR_GFX_SCLK:
712 /* get sclk in Mhz */
713 if (amdgpu_dpm_read_sensor(adev,
714 AMDGPU_PP_SENSOR_GFX_SCLK,
715 (void *)&ui32, &ui32_size)) {
716 return -EINVAL;
717 }
718 ui32 /= 100;
719 break;
720 case AMDGPU_INFO_SENSOR_GFX_MCLK:
721 /* get mclk in Mhz */
722 if (amdgpu_dpm_read_sensor(adev,
723 AMDGPU_PP_SENSOR_GFX_MCLK,
724 (void *)&ui32, &ui32_size)) {
725 return -EINVAL;
726 }
727 ui32 /= 100;
728 break;
729 case AMDGPU_INFO_SENSOR_GPU_TEMP:
730 /* get temperature in millidegrees C */
731 if (amdgpu_dpm_read_sensor(adev,
732 AMDGPU_PP_SENSOR_GPU_TEMP,
733 (void *)&ui32, &ui32_size)) {
734 return -EINVAL;
735 }
736 break;
737 case AMDGPU_INFO_SENSOR_GPU_LOAD:
738 /* get GPU load */
739 if (amdgpu_dpm_read_sensor(adev,
740 AMDGPU_PP_SENSOR_GPU_LOAD,
741 (void *)&ui32, &ui32_size)) {
742 return -EINVAL;
743 }
744 break;
745 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
746 /* get average GPU power */
747 if (amdgpu_dpm_read_sensor(adev,
748 AMDGPU_PP_SENSOR_GPU_POWER,
749 (void *)&query, &query_size)) {
750 return -EINVAL;
751 }
752 ui32 = query.average_gpu_power >> 8;
753 break;
754 case AMDGPU_INFO_SENSOR_VDDNB:
755 /* get VDDNB in millivolts */
756 if (amdgpu_dpm_read_sensor(adev,
757 AMDGPU_PP_SENSOR_VDDNB,
758 (void *)&ui32, &ui32_size)) {
759 return -EINVAL;
760 }
761 break;
762 case AMDGPU_INFO_SENSOR_VDDGFX:
763 /* get VDDGFX in millivolts */
764 if (amdgpu_dpm_read_sensor(adev,
765 AMDGPU_PP_SENSOR_VDDGFX,
766 (void *)&ui32, &ui32_size)) {
767 return -EINVAL;
768 }
769 break;
770 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
771 /* get stable pstate sclk in Mhz */
772 if (amdgpu_dpm_read_sensor(adev,
773 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
774 (void *)&ui32, &ui32_size)) {
775 return -EINVAL;
776 }
777 ui32 /= 100;
778 break;
779 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
780 /* get stable pstate mclk in Mhz */
781 if (amdgpu_dpm_read_sensor(adev,
782 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
783 (void *)&ui32, &ui32_size)) {
784 return -EINVAL;
785 }
786 ui32 /= 100;
787 break;
788 default:
789 DRM_DEBUG_KMS("Invalid request %d\n",
790 info->sensor_info.type);
791 return -EINVAL;
792 }
793 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
794 }
795 case AMDGPU_INFO_VRAM_LOST_COUNTER:
796 ui32 = atomic_read(&adev->vram_lost_counter);
797 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
798 default:
799 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
800 return -EINVAL;
801 }
802 return 0;
803}
804
805
806/*
807 * Outdated mess for old drm with Xorg being in charge (void function now).
808 */
809/**
810 * amdgpu_driver_lastclose_kms - drm callback for last close
811 *
812 * @dev: drm dev pointer
813 *
814 * Switch vga_switcheroo state after last close (all asics).
815 */
816void amdgpu_driver_lastclose_kms(struct drm_device *dev)
817{
818 drm_fb_helper_lastclose(dev);
819 vga_switcheroo_process_delayed_switch();
820}
821
822/**
823 * amdgpu_driver_open_kms - drm callback for open
824 *
825 * @dev: drm dev pointer
826 * @file_priv: drm file
827 *
828 * On device open, init vm on cayman+ (all asics).
829 * Returns 0 on success, error on failure.
830 */
831int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
832{
833 struct amdgpu_device *adev = dev->dev_private;
834 struct amdgpu_fpriv *fpriv;
835 int r, pasid;
836
837 file_priv->driver_priv = NULL;
838
839 r = pm_runtime_get_sync(dev->dev);
840 if (r < 0)
841 return r;
842
843 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
844 if (unlikely(!fpriv)) {
845 r = -ENOMEM;
846 goto out_suspend;
847 }
848
849 pasid = amdgpu_pasid_alloc(16);
850 if (pasid < 0) {
851 dev_warn(adev->dev, "No more PASIDs available!");
852 pasid = 0;
853 }
854 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
855 if (r)
856 goto error_pasid;
857
858 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
859 if (!fpriv->prt_va) {
860 r = -ENOMEM;
861 goto error_vm;
862 }
863
864 if (amdgpu_sriov_vf(adev)) {
865 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
866 if (r)
867 goto error_vm;
868 }
869
870 mutex_init(&fpriv->bo_list_lock);
871 idr_init(&fpriv->bo_list_handles);
872
873 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
874
875 file_priv->driver_priv = fpriv;
876 goto out_suspend;
877
878error_vm:
879 amdgpu_vm_fini(adev, &fpriv->vm);
880
881error_pasid:
882 if (pasid)
883 amdgpu_pasid_free(pasid);
884
885 kfree(fpriv);
886
887out_suspend:
888 pm_runtime_mark_last_busy(dev->dev);
889 pm_runtime_put_autosuspend(dev->dev);
890
891 return r;
892}
893
894/**
895 * amdgpu_driver_postclose_kms - drm callback for post close
896 *
897 * @dev: drm dev pointer
898 * @file_priv: drm file
899 *
900 * On device post close, tear down vm on cayman+ (all asics).
901 */
902void amdgpu_driver_postclose_kms(struct drm_device *dev,
903 struct drm_file *file_priv)
904{
905 struct amdgpu_device *adev = dev->dev_private;
906 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
907 struct amdgpu_bo_list *list;
908 struct amdgpu_bo *pd;
909 unsigned int pasid;
910 int handle;
911
912 if (!fpriv)
913 return;
914
915 pm_runtime_get_sync(dev->dev);
916
917 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
918
919 if (adev->asic_type != CHIP_RAVEN) {
920 amdgpu_uvd_free_handles(adev, file_priv);
921 amdgpu_vce_free_handles(adev, file_priv);
922 }
923
924 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
925
926 if (amdgpu_sriov_vf(adev)) {
927 /* TODO: how to handle reserve failure */
928 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
929 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
930 fpriv->csa_va = NULL;
931 amdgpu_bo_unreserve(adev->virt.csa_obj);
932 }
933
934 pasid = fpriv->vm.pasid;
935 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
936
937 amdgpu_vm_fini(adev, &fpriv->vm);
938 if (pasid)
939 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
940 amdgpu_bo_unref(&pd);
941
942 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
943 amdgpu_bo_list_free(list);
944
945 idr_destroy(&fpriv->bo_list_handles);
946 mutex_destroy(&fpriv->bo_list_lock);
947
948 kfree(fpriv);
949 file_priv->driver_priv = NULL;
950
951 pm_runtime_mark_last_busy(dev->dev);
952 pm_runtime_put_autosuspend(dev->dev);
953}
954
955/*
956 * VBlank related functions.
957 */
958/**
959 * amdgpu_get_vblank_counter_kms - get frame count
960 *
961 * @dev: drm dev pointer
962 * @pipe: crtc to get the frame count from
963 *
964 * Gets the frame count on the requested crtc (all asics).
965 * Returns frame count on success, -EINVAL on failure.
966 */
967u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
968{
969 struct amdgpu_device *adev = dev->dev_private;
970 int vpos, hpos, stat;
971 u32 count;
972
973 if (pipe >= adev->mode_info.num_crtc) {
974 DRM_ERROR("Invalid crtc %u\n", pipe);
975 return -EINVAL;
976 }
977
978 /* The hw increments its frame counter at start of vsync, not at start
979 * of vblank, as is required by DRM core vblank counter handling.
980 * Cook the hw count here to make it appear to the caller as if it
981 * incremented at start of vblank. We measure distance to start of
982 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
983 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
984 * result by 1 to give the proper appearance to caller.
985 */
986 if (adev->mode_info.crtcs[pipe]) {
987 /* Repeat readout if needed to provide stable result if
988 * we cross start of vsync during the queries.
989 */
990 do {
991 count = amdgpu_display_vblank_get_counter(adev, pipe);
992 /* Ask amdgpu_display_get_crtc_scanoutpos to return
993 * vpos as distance to start of vblank, instead of
994 * regular vertical scanout pos.
995 */
996 stat = amdgpu_display_get_crtc_scanoutpos(
997 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
998 &vpos, &hpos, NULL, NULL,
999 &adev->mode_info.crtcs[pipe]->base.hwmode);
1000 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1001
1002 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1003 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1004 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1005 } else {
1006 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1007 pipe, vpos);
1008
1009 /* Bump counter if we are at >= leading edge of vblank,
1010 * but before vsync where vpos would turn negative and
1011 * the hw counter really increments.
1012 */
1013 if (vpos >= 0)
1014 count++;
1015 }
1016 } else {
1017 /* Fallback to use value as is. */
1018 count = amdgpu_display_vblank_get_counter(adev, pipe);
1019 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1020 }
1021
1022 return count;
1023}
1024
1025/**
1026 * amdgpu_enable_vblank_kms - enable vblank interrupt
1027 *
1028 * @dev: drm dev pointer
1029 * @pipe: crtc to enable vblank interrupt for
1030 *
1031 * Enable the interrupt on the requested crtc (all asics).
1032 * Returns 0 on success, -EINVAL on failure.
1033 */
1034int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1035{
1036 struct amdgpu_device *adev = dev->dev_private;
1037 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1038
1039 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1040}
1041
1042/**
1043 * amdgpu_disable_vblank_kms - disable vblank interrupt
1044 *
1045 * @dev: drm dev pointer
1046 * @pipe: crtc to disable vblank interrupt for
1047 *
1048 * Disable the interrupt on the requested crtc (all asics).
1049 */
1050void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1051{
1052 struct amdgpu_device *adev = dev->dev_private;
1053 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1054
1055 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1056}
1057
1058const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1059 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1060 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1061 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1062 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1063 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1064 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1065 /* KMS */
1066 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1067 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1068 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1069 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1070 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1071 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1072 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1073 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1074 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1075 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1076};
1077const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1078
1079/*
1080 * Debugfs info
1081 */
1082#if defined(CONFIG_DEBUG_FS)
1083
1084static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1085{
1086 struct drm_info_node *node = (struct drm_info_node *) m->private;
1087 struct drm_device *dev = node->minor->dev;
1088 struct amdgpu_device *adev = dev->dev_private;
1089 struct drm_amdgpu_info_firmware fw_info;
1090 struct drm_amdgpu_query_fw query_fw;
1091 int ret, i;
1092
1093 /* VCE */
1094 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1095 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1096 if (ret)
1097 return ret;
1098 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1099 fw_info.feature, fw_info.ver);
1100
1101 /* UVD */
1102 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1103 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1104 if (ret)
1105 return ret;
1106 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1107 fw_info.feature, fw_info.ver);
1108
1109 /* GMC */
1110 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1111 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1112 if (ret)
1113 return ret;
1114 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1115 fw_info.feature, fw_info.ver);
1116
1117 /* ME */
1118 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1119 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1120 if (ret)
1121 return ret;
1122 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1123 fw_info.feature, fw_info.ver);
1124
1125 /* PFP */
1126 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1127 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1128 if (ret)
1129 return ret;
1130 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1131 fw_info.feature, fw_info.ver);
1132
1133 /* CE */
1134 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1135 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1136 if (ret)
1137 return ret;
1138 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1139 fw_info.feature, fw_info.ver);
1140
1141 /* RLC */
1142 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1143 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1144 if (ret)
1145 return ret;
1146 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1147 fw_info.feature, fw_info.ver);
1148
1149 /* MEC */
1150 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1151 query_fw.index = 0;
1152 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1153 if (ret)
1154 return ret;
1155 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1156 fw_info.feature, fw_info.ver);
1157
1158 /* MEC2 */
1159 if (adev->asic_type == CHIP_KAVERI ||
1160 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1161 query_fw.index = 1;
1162 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1163 if (ret)
1164 return ret;
1165 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1166 fw_info.feature, fw_info.ver);
1167 }
1168
1169 /* PSP SOS */
1170 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1171 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1172 if (ret)
1173 return ret;
1174 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1175 fw_info.feature, fw_info.ver);
1176
1177
1178 /* PSP ASD */
1179 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1180 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1181 if (ret)
1182 return ret;
1183 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1184 fw_info.feature, fw_info.ver);
1185
1186 /* SMC */
1187 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1188 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1189 if (ret)
1190 return ret;
1191 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1192 fw_info.feature, fw_info.ver);
1193
1194 /* SDMA */
1195 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1196 for (i = 0; i < adev->sdma.num_instances; i++) {
1197 query_fw.index = i;
1198 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1199 if (ret)
1200 return ret;
1201 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1202 i, fw_info.feature, fw_info.ver);
1203 }
1204
1205 /* VCN */
1206 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1207 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1208 if (ret)
1209 return ret;
1210 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1211 fw_info.feature, fw_info.ver);
1212
1213 return 0;
1214}
1215
1216static const struct drm_info_list amdgpu_firmware_info_list[] = {
1217 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1218};
1219#endif
1220
1221int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1222{
1223#if defined(CONFIG_DEBUG_FS)
1224 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1225 ARRAY_SIZE(amdgpu_firmware_info_list));
1226#else
1227 return 0;
1228#endif
1229}
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include "amdgpu.h"
30#include <drm/drm_debugfs.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu_sched.h"
33#include "amdgpu_uvd.h"
34#include "amdgpu_vce.h"
35#include "atom.h"
36
37#include <linux/vga_switcheroo.h>
38#include <linux/slab.h>
39#include <linux/uaccess.h>
40#include <linux/pci.h>
41#include <linux/pm_runtime.h>
42#include "amdgpu_amdkfd.h"
43#include "amdgpu_gem.h"
44#include "amdgpu_display.h"
45#include "amdgpu_ras.h"
46
47void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48{
49 struct amdgpu_gpu_instance *gpu_instance;
50 int i;
51
52 mutex_lock(&mgpu_info.mutex);
53
54 for (i = 0; i < mgpu_info.num_gpu; i++) {
55 gpu_instance = &(mgpu_info.gpu_ins[i]);
56 if (gpu_instance->adev == adev) {
57 mgpu_info.gpu_ins[i] =
58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59 mgpu_info.num_gpu--;
60 if (adev->flags & AMD_IS_APU)
61 mgpu_info.num_apu--;
62 else
63 mgpu_info.num_dgpu--;
64 break;
65 }
66 }
67
68 mutex_unlock(&mgpu_info.mutex);
69}
70
71/**
72 * amdgpu_driver_unload_kms - Main unload function for KMS.
73 *
74 * @dev: drm dev pointer
75 *
76 * This is the main unload function for KMS (all asics).
77 * Returns 0 on success.
78 */
79void amdgpu_driver_unload_kms(struct drm_device *dev)
80{
81 struct amdgpu_device *adev = dev->dev_private;
82
83 if (adev == NULL)
84 return;
85
86 amdgpu_unregister_gpu_instance(adev);
87
88 if (adev->rmmio == NULL)
89 goto done_free;
90
91 if (amdgpu_sriov_vf(adev))
92 amdgpu_virt_request_full_gpu(adev, false);
93
94 if (amdgpu_device_is_px(dev)) {
95 pm_runtime_get_sync(dev->dev);
96 pm_runtime_forbid(dev->dev);
97 }
98
99 amdgpu_acpi_fini(adev);
100
101 amdgpu_device_fini(adev);
102
103done_free:
104 kfree(adev);
105 dev->dev_private = NULL;
106}
107
108void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
109{
110 struct amdgpu_gpu_instance *gpu_instance;
111
112 mutex_lock(&mgpu_info.mutex);
113
114 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
115 DRM_ERROR("Cannot register more gpu instance\n");
116 mutex_unlock(&mgpu_info.mutex);
117 return;
118 }
119
120 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
121 gpu_instance->adev = adev;
122 gpu_instance->mgpu_fan_enabled = 0;
123
124 mgpu_info.num_gpu++;
125 if (adev->flags & AMD_IS_APU)
126 mgpu_info.num_apu++;
127 else
128 mgpu_info.num_dgpu++;
129
130 mutex_unlock(&mgpu_info.mutex);
131}
132
133/**
134 * amdgpu_driver_load_kms - Main load function for KMS.
135 *
136 * @dev: drm dev pointer
137 * @flags: device flags
138 *
139 * This is the main load function for KMS (all asics).
140 * Returns 0 on success, error on failure.
141 */
142int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
143{
144 struct amdgpu_device *adev;
145 int r, acpi_status;
146
147 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
148 if (adev == NULL) {
149 return -ENOMEM;
150 }
151 dev->dev_private = (void *)adev;
152
153 if ((amdgpu_runtime_pm != 0) &&
154 amdgpu_has_atpx() &&
155 (amdgpu_is_atpx_hybrid() ||
156 amdgpu_has_atpx_dgpu_power_cntl()) &&
157 ((flags & AMD_IS_APU) == 0) &&
158 !pci_is_thunderbolt_attached(dev->pdev))
159 flags |= AMD_IS_PX;
160
161 /* amdgpu_device_init should report only fatal error
162 * like memory allocation failure or iomapping failure,
163 * or memory manager initialization failure, it must
164 * properly initialize the GPU MC controller and permit
165 * VRAM allocation
166 */
167 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
168 if (r) {
169 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
170 goto out;
171 }
172
173 /* Call ACPI methods: require modeset init
174 * but failure is not fatal
175 */
176 if (!r) {
177 acpi_status = amdgpu_acpi_init(adev);
178 if (acpi_status)
179 dev_dbg(&dev->pdev->dev,
180 "Error during ACPI methods call\n");
181 }
182
183 if (amdgpu_device_is_px(dev)) {
184 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
185 pm_runtime_use_autosuspend(dev->dev);
186 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
187 pm_runtime_set_active(dev->dev);
188 pm_runtime_allow(dev->dev);
189 pm_runtime_mark_last_busy(dev->dev);
190 pm_runtime_put_autosuspend(dev->dev);
191 }
192
193out:
194 if (r) {
195 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
196 if (adev->rmmio && amdgpu_device_is_px(dev))
197 pm_runtime_put_noidle(dev->dev);
198 amdgpu_driver_unload_kms(dev);
199 }
200
201 return r;
202}
203
204static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
205 struct drm_amdgpu_query_fw *query_fw,
206 struct amdgpu_device *adev)
207{
208 switch (query_fw->fw_type) {
209 case AMDGPU_INFO_FW_VCE:
210 fw_info->ver = adev->vce.fw_version;
211 fw_info->feature = adev->vce.fb_version;
212 break;
213 case AMDGPU_INFO_FW_UVD:
214 fw_info->ver = adev->uvd.fw_version;
215 fw_info->feature = 0;
216 break;
217 case AMDGPU_INFO_FW_VCN:
218 fw_info->ver = adev->vcn.fw_version;
219 fw_info->feature = 0;
220 break;
221 case AMDGPU_INFO_FW_GMC:
222 fw_info->ver = adev->gmc.fw_version;
223 fw_info->feature = 0;
224 break;
225 case AMDGPU_INFO_FW_GFX_ME:
226 fw_info->ver = adev->gfx.me_fw_version;
227 fw_info->feature = adev->gfx.me_feature_version;
228 break;
229 case AMDGPU_INFO_FW_GFX_PFP:
230 fw_info->ver = adev->gfx.pfp_fw_version;
231 fw_info->feature = adev->gfx.pfp_feature_version;
232 break;
233 case AMDGPU_INFO_FW_GFX_CE:
234 fw_info->ver = adev->gfx.ce_fw_version;
235 fw_info->feature = adev->gfx.ce_feature_version;
236 break;
237 case AMDGPU_INFO_FW_GFX_RLC:
238 fw_info->ver = adev->gfx.rlc_fw_version;
239 fw_info->feature = adev->gfx.rlc_feature_version;
240 break;
241 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
242 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
243 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
244 break;
245 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
246 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
247 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
248 break;
249 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
250 fw_info->ver = adev->gfx.rlc_srls_fw_version;
251 fw_info->feature = adev->gfx.rlc_srls_feature_version;
252 break;
253 case AMDGPU_INFO_FW_GFX_MEC:
254 if (query_fw->index == 0) {
255 fw_info->ver = adev->gfx.mec_fw_version;
256 fw_info->feature = adev->gfx.mec_feature_version;
257 } else if (query_fw->index == 1) {
258 fw_info->ver = adev->gfx.mec2_fw_version;
259 fw_info->feature = adev->gfx.mec2_feature_version;
260 } else
261 return -EINVAL;
262 break;
263 case AMDGPU_INFO_FW_SMC:
264 fw_info->ver = adev->pm.fw_version;
265 fw_info->feature = 0;
266 break;
267 case AMDGPU_INFO_FW_TA:
268 if (query_fw->index > 1)
269 return -EINVAL;
270 if (query_fw->index == 0) {
271 fw_info->ver = adev->psp.ta_fw_version;
272 fw_info->feature = adev->psp.ta_xgmi_ucode_version;
273 } else {
274 fw_info->ver = adev->psp.ta_fw_version;
275 fw_info->feature = adev->psp.ta_ras_ucode_version;
276 }
277 break;
278 case AMDGPU_INFO_FW_SDMA:
279 if (query_fw->index >= adev->sdma.num_instances)
280 return -EINVAL;
281 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
282 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
283 break;
284 case AMDGPU_INFO_FW_SOS:
285 fw_info->ver = adev->psp.sos_fw_version;
286 fw_info->feature = adev->psp.sos_feature_version;
287 break;
288 case AMDGPU_INFO_FW_ASD:
289 fw_info->ver = adev->psp.asd_fw_version;
290 fw_info->feature = adev->psp.asd_feature_version;
291 break;
292 case AMDGPU_INFO_FW_DMCU:
293 fw_info->ver = adev->dm.dmcu_fw_version;
294 fw_info->feature = 0;
295 break;
296 default:
297 return -EINVAL;
298 }
299 return 0;
300}
301
302static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
303 struct drm_amdgpu_info *info,
304 struct drm_amdgpu_info_hw_ip *result)
305{
306 uint32_t ib_start_alignment = 0;
307 uint32_t ib_size_alignment = 0;
308 enum amd_ip_block_type type;
309 unsigned int num_rings = 0;
310 unsigned int i, j;
311
312 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
313 return -EINVAL;
314
315 switch (info->query_hw_ip.type) {
316 case AMDGPU_HW_IP_GFX:
317 type = AMD_IP_BLOCK_TYPE_GFX;
318 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
319 if (adev->gfx.gfx_ring[i].sched.ready)
320 ++num_rings;
321 ib_start_alignment = 32;
322 ib_size_alignment = 32;
323 break;
324 case AMDGPU_HW_IP_COMPUTE:
325 type = AMD_IP_BLOCK_TYPE_GFX;
326 for (i = 0; i < adev->gfx.num_compute_rings; i++)
327 if (adev->gfx.compute_ring[i].sched.ready)
328 ++num_rings;
329 ib_start_alignment = 32;
330 ib_size_alignment = 32;
331 break;
332 case AMDGPU_HW_IP_DMA:
333 type = AMD_IP_BLOCK_TYPE_SDMA;
334 for (i = 0; i < adev->sdma.num_instances; i++)
335 if (adev->sdma.instance[i].ring.sched.ready)
336 ++num_rings;
337 ib_start_alignment = 256;
338 ib_size_alignment = 4;
339 break;
340 case AMDGPU_HW_IP_UVD:
341 type = AMD_IP_BLOCK_TYPE_UVD;
342 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
343 if (adev->uvd.harvest_config & (1 << i))
344 continue;
345
346 if (adev->uvd.inst[i].ring.sched.ready)
347 ++num_rings;
348 }
349 ib_start_alignment = 64;
350 ib_size_alignment = 64;
351 break;
352 case AMDGPU_HW_IP_VCE:
353 type = AMD_IP_BLOCK_TYPE_VCE;
354 for (i = 0; i < adev->vce.num_rings; i++)
355 if (adev->vce.ring[i].sched.ready)
356 ++num_rings;
357 ib_start_alignment = 4;
358 ib_size_alignment = 1;
359 break;
360 case AMDGPU_HW_IP_UVD_ENC:
361 type = AMD_IP_BLOCK_TYPE_UVD;
362 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
363 if (adev->uvd.harvest_config & (1 << i))
364 continue;
365
366 for (j = 0; j < adev->uvd.num_enc_rings; j++)
367 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
368 ++num_rings;
369 }
370 ib_start_alignment = 64;
371 ib_size_alignment = 64;
372 break;
373 case AMDGPU_HW_IP_VCN_DEC:
374 type = AMD_IP_BLOCK_TYPE_VCN;
375 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
376 if (adev->uvd.harvest_config & (1 << i))
377 continue;
378
379 if (adev->vcn.inst[i].ring_dec.sched.ready)
380 ++num_rings;
381 }
382 ib_start_alignment = 16;
383 ib_size_alignment = 16;
384 break;
385 case AMDGPU_HW_IP_VCN_ENC:
386 type = AMD_IP_BLOCK_TYPE_VCN;
387 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
388 if (adev->uvd.harvest_config & (1 << i))
389 continue;
390
391 for (j = 0; j < adev->vcn.num_enc_rings; j++)
392 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
393 ++num_rings;
394 }
395 ib_start_alignment = 64;
396 ib_size_alignment = 1;
397 break;
398 case AMDGPU_HW_IP_VCN_JPEG:
399 type = AMD_IP_BLOCK_TYPE_VCN;
400 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
401 if (adev->uvd.harvest_config & (1 << i))
402 continue;
403
404 if (adev->vcn.inst[i].ring_jpeg.sched.ready)
405 ++num_rings;
406 }
407 ib_start_alignment = 16;
408 ib_size_alignment = 16;
409 break;
410 default:
411 return -EINVAL;
412 }
413
414 for (i = 0; i < adev->num_ip_blocks; i++)
415 if (adev->ip_blocks[i].version->type == type &&
416 adev->ip_blocks[i].status.valid)
417 break;
418
419 if (i == adev->num_ip_blocks)
420 return 0;
421
422 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
423 num_rings);
424
425 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
426 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
427 result->capabilities_flags = 0;
428 result->available_rings = (1 << num_rings) - 1;
429 result->ib_start_alignment = ib_start_alignment;
430 result->ib_size_alignment = ib_size_alignment;
431 return 0;
432}
433
434/*
435 * Userspace get information ioctl
436 */
437/**
438 * amdgpu_info_ioctl - answer a device specific request.
439 *
440 * @adev: amdgpu device pointer
441 * @data: request object
442 * @filp: drm filp
443 *
444 * This function is used to pass device specific parameters to the userspace
445 * drivers. Examples include: pci device id, pipeline parms, tiling params,
446 * etc. (all asics).
447 * Returns 0 on success, -EINVAL on failure.
448 */
449static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
450{
451 struct amdgpu_device *adev = dev->dev_private;
452 struct drm_amdgpu_info *info = data;
453 struct amdgpu_mode_info *minfo = &adev->mode_info;
454 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
455 uint32_t size = info->return_size;
456 struct drm_crtc *crtc;
457 uint32_t ui32 = 0;
458 uint64_t ui64 = 0;
459 int i, found;
460 int ui32_size = sizeof(ui32);
461
462 if (!info->return_size || !info->return_pointer)
463 return -EINVAL;
464
465 switch (info->query) {
466 case AMDGPU_INFO_ACCEL_WORKING:
467 ui32 = adev->accel_working;
468 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
469 case AMDGPU_INFO_CRTC_FROM_ID:
470 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
471 crtc = (struct drm_crtc *)minfo->crtcs[i];
472 if (crtc && crtc->base.id == info->mode_crtc.id) {
473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
474 ui32 = amdgpu_crtc->crtc_id;
475 found = 1;
476 break;
477 }
478 }
479 if (!found) {
480 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
481 return -EINVAL;
482 }
483 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
484 case AMDGPU_INFO_HW_IP_INFO: {
485 struct drm_amdgpu_info_hw_ip ip = {};
486 int ret;
487
488 ret = amdgpu_hw_ip_info(adev, info, &ip);
489 if (ret)
490 return ret;
491
492 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
493 return ret ? -EFAULT : 0;
494 }
495 case AMDGPU_INFO_HW_IP_COUNT: {
496 enum amd_ip_block_type type;
497 uint32_t count = 0;
498
499 switch (info->query_hw_ip.type) {
500 case AMDGPU_HW_IP_GFX:
501 type = AMD_IP_BLOCK_TYPE_GFX;
502 break;
503 case AMDGPU_HW_IP_COMPUTE:
504 type = AMD_IP_BLOCK_TYPE_GFX;
505 break;
506 case AMDGPU_HW_IP_DMA:
507 type = AMD_IP_BLOCK_TYPE_SDMA;
508 break;
509 case AMDGPU_HW_IP_UVD:
510 type = AMD_IP_BLOCK_TYPE_UVD;
511 break;
512 case AMDGPU_HW_IP_VCE:
513 type = AMD_IP_BLOCK_TYPE_VCE;
514 break;
515 case AMDGPU_HW_IP_UVD_ENC:
516 type = AMD_IP_BLOCK_TYPE_UVD;
517 break;
518 case AMDGPU_HW_IP_VCN_DEC:
519 case AMDGPU_HW_IP_VCN_ENC:
520 case AMDGPU_HW_IP_VCN_JPEG:
521 type = AMD_IP_BLOCK_TYPE_VCN;
522 break;
523 default:
524 return -EINVAL;
525 }
526
527 for (i = 0; i < adev->num_ip_blocks; i++)
528 if (adev->ip_blocks[i].version->type == type &&
529 adev->ip_blocks[i].status.valid &&
530 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
531 count++;
532
533 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
534 }
535 case AMDGPU_INFO_TIMESTAMP:
536 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
537 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
538 case AMDGPU_INFO_FW_VERSION: {
539 struct drm_amdgpu_info_firmware fw_info;
540 int ret;
541
542 /* We only support one instance of each IP block right now. */
543 if (info->query_fw.ip_instance != 0)
544 return -EINVAL;
545
546 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
547 if (ret)
548 return ret;
549
550 return copy_to_user(out, &fw_info,
551 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
552 }
553 case AMDGPU_INFO_NUM_BYTES_MOVED:
554 ui64 = atomic64_read(&adev->num_bytes_moved);
555 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
556 case AMDGPU_INFO_NUM_EVICTIONS:
557 ui64 = atomic64_read(&adev->num_evictions);
558 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
559 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
560 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
561 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
562 case AMDGPU_INFO_VRAM_USAGE:
563 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
564 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
565 case AMDGPU_INFO_VIS_VRAM_USAGE:
566 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
567 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
568 case AMDGPU_INFO_GTT_USAGE:
569 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
571 case AMDGPU_INFO_GDS_CONFIG: {
572 struct drm_amdgpu_info_gds gds_info;
573
574 memset(&gds_info, 0, sizeof(gds_info));
575 gds_info.compute_partition_size = adev->gds.gds_size;
576 gds_info.gds_total_size = adev->gds.gds_size;
577 gds_info.gws_per_compute_partition = adev->gds.gws_size;
578 gds_info.oa_per_compute_partition = adev->gds.oa_size;
579 return copy_to_user(out, &gds_info,
580 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
581 }
582 case AMDGPU_INFO_VRAM_GTT: {
583 struct drm_amdgpu_info_vram_gtt vram_gtt;
584
585 vram_gtt.vram_size = adev->gmc.real_vram_size -
586 atomic64_read(&adev->vram_pin_size);
587 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
588 atomic64_read(&adev->visible_pin_size);
589 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
590 vram_gtt.gtt_size *= PAGE_SIZE;
591 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
592 return copy_to_user(out, &vram_gtt,
593 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
594 }
595 case AMDGPU_INFO_MEMORY: {
596 struct drm_amdgpu_memory_info mem;
597
598 memset(&mem, 0, sizeof(mem));
599 mem.vram.total_heap_size = adev->gmc.real_vram_size;
600 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
601 atomic64_read(&adev->vram_pin_size);
602 mem.vram.heap_usage =
603 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
604 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
605
606 mem.cpu_accessible_vram.total_heap_size =
607 adev->gmc.visible_vram_size;
608 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
609 atomic64_read(&adev->visible_pin_size);
610 mem.cpu_accessible_vram.heap_usage =
611 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
612 mem.cpu_accessible_vram.max_allocation =
613 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
614
615 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
616 mem.gtt.total_heap_size *= PAGE_SIZE;
617 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
618 atomic64_read(&adev->gart_pin_size);
619 mem.gtt.heap_usage =
620 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
621 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
622
623 return copy_to_user(out, &mem,
624 min((size_t)size, sizeof(mem)))
625 ? -EFAULT : 0;
626 }
627 case AMDGPU_INFO_READ_MMR_REG: {
628 unsigned n, alloc_size;
629 uint32_t *regs;
630 unsigned se_num = (info->read_mmr_reg.instance >>
631 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
632 AMDGPU_INFO_MMR_SE_INDEX_MASK;
633 unsigned sh_num = (info->read_mmr_reg.instance >>
634 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
635 AMDGPU_INFO_MMR_SH_INDEX_MASK;
636
637 /* set full masks if the userspace set all bits
638 * in the bitfields */
639 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
640 se_num = 0xffffffff;
641 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
642 sh_num = 0xffffffff;
643
644 if (info->read_mmr_reg.count > 128)
645 return -EINVAL;
646
647 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
648 if (!regs)
649 return -ENOMEM;
650 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
651
652 amdgpu_gfx_off_ctrl(adev, false);
653 for (i = 0; i < info->read_mmr_reg.count; i++) {
654 if (amdgpu_asic_read_register(adev, se_num, sh_num,
655 info->read_mmr_reg.dword_offset + i,
656 ®s[i])) {
657 DRM_DEBUG_KMS("unallowed offset %#x\n",
658 info->read_mmr_reg.dword_offset + i);
659 kfree(regs);
660 amdgpu_gfx_off_ctrl(adev, true);
661 return -EFAULT;
662 }
663 }
664 amdgpu_gfx_off_ctrl(adev, true);
665 n = copy_to_user(out, regs, min(size, alloc_size));
666 kfree(regs);
667 return n ? -EFAULT : 0;
668 }
669 case AMDGPU_INFO_DEV_INFO: {
670 struct drm_amdgpu_info_device dev_info = {};
671 uint64_t vm_size;
672
673 dev_info.device_id = dev->pdev->device;
674 dev_info.chip_rev = adev->rev_id;
675 dev_info.external_rev = adev->external_rev_id;
676 dev_info.pci_rev = dev->pdev->revision;
677 dev_info.family = adev->family;
678 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
679 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
680 /* return all clocks in KHz */
681 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
682 if (adev->pm.dpm_enabled) {
683 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
684 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
685 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
686 adev->virt.ops->get_pp_clk) {
687 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
688 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
689 } else {
690 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
691 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
692 }
693 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
694 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
695 adev->gfx.config.max_shader_engines;
696 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
697 dev_info._pad = 0;
698 dev_info.ids_flags = 0;
699 if (adev->flags & AMD_IS_APU)
700 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
701 if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
702 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
703
704 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
705 vm_size -= AMDGPU_VA_RESERVED_SIZE;
706
707 /* Older VCE FW versions are buggy and can handle only 40bits */
708 if (adev->vce.fw_version &&
709 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
710 vm_size = min(vm_size, 1ULL << 40);
711
712 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
713 dev_info.virtual_address_max =
714 min(vm_size, AMDGPU_GMC_HOLE_START);
715
716 if (vm_size > AMDGPU_GMC_HOLE_START) {
717 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
718 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
719 }
720 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
721 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
722 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
723 dev_info.cu_active_number = adev->gfx.cu_info.number;
724 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
725 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
726 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
727 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
728 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
729 sizeof(adev->gfx.cu_info.bitmap));
730 dev_info.vram_type = adev->gmc.vram_type;
731 dev_info.vram_bit_width = adev->gmc.vram_width;
732 dev_info.vce_harvest_config = adev->vce.harvest_config;
733 dev_info.gc_double_offchip_lds_buf =
734 adev->gfx.config.double_offchip_lds_buf;
735
736 if (amdgpu_ngg) {
737 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
738 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
739 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
740 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
741 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
742 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
743 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
744 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
745 }
746 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
747 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
748 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
749 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
750 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
751 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
752 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
753
754 if (adev->family >= AMDGPU_FAMILY_NV)
755 dev_info.pa_sc_tile_steering_override =
756 adev->gfx.config.pa_sc_tile_steering_override;
757
758 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
759
760 return copy_to_user(out, &dev_info,
761 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
762 }
763 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
764 unsigned i;
765 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
766 struct amd_vce_state *vce_state;
767
768 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
769 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
770 if (vce_state) {
771 vce_clk_table.entries[i].sclk = vce_state->sclk;
772 vce_clk_table.entries[i].mclk = vce_state->mclk;
773 vce_clk_table.entries[i].eclk = vce_state->evclk;
774 vce_clk_table.num_valid_entries++;
775 }
776 }
777
778 return copy_to_user(out, &vce_clk_table,
779 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
780 }
781 case AMDGPU_INFO_VBIOS: {
782 uint32_t bios_size = adev->bios_size;
783
784 switch (info->vbios_info.type) {
785 case AMDGPU_INFO_VBIOS_SIZE:
786 return copy_to_user(out, &bios_size,
787 min((size_t)size, sizeof(bios_size)))
788 ? -EFAULT : 0;
789 case AMDGPU_INFO_VBIOS_IMAGE: {
790 uint8_t *bios;
791 uint32_t bios_offset = info->vbios_info.offset;
792
793 if (bios_offset >= bios_size)
794 return -EINVAL;
795
796 bios = adev->bios + bios_offset;
797 return copy_to_user(out, bios,
798 min((size_t)size, (size_t)(bios_size - bios_offset)))
799 ? -EFAULT : 0;
800 }
801 default:
802 DRM_DEBUG_KMS("Invalid request %d\n",
803 info->vbios_info.type);
804 return -EINVAL;
805 }
806 }
807 case AMDGPU_INFO_NUM_HANDLES: {
808 struct drm_amdgpu_info_num_handles handle;
809
810 switch (info->query_hw_ip.type) {
811 case AMDGPU_HW_IP_UVD:
812 /* Starting Polaris, we support unlimited UVD handles */
813 if (adev->asic_type < CHIP_POLARIS10) {
814 handle.uvd_max_handles = adev->uvd.max_handles;
815 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
816
817 return copy_to_user(out, &handle,
818 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
819 } else {
820 return -ENODATA;
821 }
822
823 break;
824 default:
825 return -EINVAL;
826 }
827 }
828 case AMDGPU_INFO_SENSOR: {
829 if (!adev->pm.dpm_enabled)
830 return -ENOENT;
831
832 switch (info->sensor_info.type) {
833 case AMDGPU_INFO_SENSOR_GFX_SCLK:
834 /* get sclk in Mhz */
835 if (amdgpu_dpm_read_sensor(adev,
836 AMDGPU_PP_SENSOR_GFX_SCLK,
837 (void *)&ui32, &ui32_size)) {
838 return -EINVAL;
839 }
840 ui32 /= 100;
841 break;
842 case AMDGPU_INFO_SENSOR_GFX_MCLK:
843 /* get mclk in Mhz */
844 if (amdgpu_dpm_read_sensor(adev,
845 AMDGPU_PP_SENSOR_GFX_MCLK,
846 (void *)&ui32, &ui32_size)) {
847 return -EINVAL;
848 }
849 ui32 /= 100;
850 break;
851 case AMDGPU_INFO_SENSOR_GPU_TEMP:
852 /* get temperature in millidegrees C */
853 if (amdgpu_dpm_read_sensor(adev,
854 AMDGPU_PP_SENSOR_GPU_TEMP,
855 (void *)&ui32, &ui32_size)) {
856 return -EINVAL;
857 }
858 break;
859 case AMDGPU_INFO_SENSOR_GPU_LOAD:
860 /* get GPU load */
861 if (amdgpu_dpm_read_sensor(adev,
862 AMDGPU_PP_SENSOR_GPU_LOAD,
863 (void *)&ui32, &ui32_size)) {
864 return -EINVAL;
865 }
866 break;
867 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
868 /* get average GPU power */
869 if (amdgpu_dpm_read_sensor(adev,
870 AMDGPU_PP_SENSOR_GPU_POWER,
871 (void *)&ui32, &ui32_size)) {
872 return -EINVAL;
873 }
874 ui32 >>= 8;
875 break;
876 case AMDGPU_INFO_SENSOR_VDDNB:
877 /* get VDDNB in millivolts */
878 if (amdgpu_dpm_read_sensor(adev,
879 AMDGPU_PP_SENSOR_VDDNB,
880 (void *)&ui32, &ui32_size)) {
881 return -EINVAL;
882 }
883 break;
884 case AMDGPU_INFO_SENSOR_VDDGFX:
885 /* get VDDGFX in millivolts */
886 if (amdgpu_dpm_read_sensor(adev,
887 AMDGPU_PP_SENSOR_VDDGFX,
888 (void *)&ui32, &ui32_size)) {
889 return -EINVAL;
890 }
891 break;
892 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
893 /* get stable pstate sclk in Mhz */
894 if (amdgpu_dpm_read_sensor(adev,
895 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
896 (void *)&ui32, &ui32_size)) {
897 return -EINVAL;
898 }
899 ui32 /= 100;
900 break;
901 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
902 /* get stable pstate mclk in Mhz */
903 if (amdgpu_dpm_read_sensor(adev,
904 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
905 (void *)&ui32, &ui32_size)) {
906 return -EINVAL;
907 }
908 ui32 /= 100;
909 break;
910 default:
911 DRM_DEBUG_KMS("Invalid request %d\n",
912 info->sensor_info.type);
913 return -EINVAL;
914 }
915 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
916 }
917 case AMDGPU_INFO_VRAM_LOST_COUNTER:
918 ui32 = atomic_read(&adev->vram_lost_counter);
919 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
920 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
921 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
922 uint64_t ras_mask;
923
924 if (!ras)
925 return -EINVAL;
926 ras_mask = (uint64_t)ras->supported << 32 | ras->features;
927
928 return copy_to_user(out, &ras_mask,
929 min_t(u64, size, sizeof(ras_mask))) ?
930 -EFAULT : 0;
931 }
932 default:
933 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
934 return -EINVAL;
935 }
936 return 0;
937}
938
939
940/*
941 * Outdated mess for old drm with Xorg being in charge (void function now).
942 */
943/**
944 * amdgpu_driver_lastclose_kms - drm callback for last close
945 *
946 * @dev: drm dev pointer
947 *
948 * Switch vga_switcheroo state after last close (all asics).
949 */
950void amdgpu_driver_lastclose_kms(struct drm_device *dev)
951{
952 drm_fb_helper_lastclose(dev);
953 vga_switcheroo_process_delayed_switch();
954}
955
956/**
957 * amdgpu_driver_open_kms - drm callback for open
958 *
959 * @dev: drm dev pointer
960 * @file_priv: drm file
961 *
962 * On device open, init vm on cayman+ (all asics).
963 * Returns 0 on success, error on failure.
964 */
965int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
966{
967 struct amdgpu_device *adev = dev->dev_private;
968 struct amdgpu_fpriv *fpriv;
969 int r, pasid;
970
971 /* Ensure IB tests are run on ring */
972 flush_delayed_work(&adev->delayed_init_work);
973
974 file_priv->driver_priv = NULL;
975
976 r = pm_runtime_get_sync(dev->dev);
977 if (r < 0)
978 return r;
979
980 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
981 if (unlikely(!fpriv)) {
982 r = -ENOMEM;
983 goto out_suspend;
984 }
985
986 pasid = amdgpu_pasid_alloc(16);
987 if (pasid < 0) {
988 dev_warn(adev->dev, "No more PASIDs available!");
989 pasid = 0;
990 }
991 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
992 if (r)
993 goto error_pasid;
994
995 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
996 if (!fpriv->prt_va) {
997 r = -ENOMEM;
998 goto error_vm;
999 }
1000
1001 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1002 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1003
1004 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1005 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1006 if (r)
1007 goto error_vm;
1008 }
1009
1010 mutex_init(&fpriv->bo_list_lock);
1011 idr_init(&fpriv->bo_list_handles);
1012
1013 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1014
1015 file_priv->driver_priv = fpriv;
1016 goto out_suspend;
1017
1018error_vm:
1019 amdgpu_vm_fini(adev, &fpriv->vm);
1020
1021error_pasid:
1022 if (pasid)
1023 amdgpu_pasid_free(pasid);
1024
1025 kfree(fpriv);
1026
1027out_suspend:
1028 pm_runtime_mark_last_busy(dev->dev);
1029 pm_runtime_put_autosuspend(dev->dev);
1030
1031 return r;
1032}
1033
1034/**
1035 * amdgpu_driver_postclose_kms - drm callback for post close
1036 *
1037 * @dev: drm dev pointer
1038 * @file_priv: drm file
1039 *
1040 * On device post close, tear down vm on cayman+ (all asics).
1041 */
1042void amdgpu_driver_postclose_kms(struct drm_device *dev,
1043 struct drm_file *file_priv)
1044{
1045 struct amdgpu_device *adev = dev->dev_private;
1046 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1047 struct amdgpu_bo_list *list;
1048 struct amdgpu_bo *pd;
1049 unsigned int pasid;
1050 int handle;
1051
1052 if (!fpriv)
1053 return;
1054
1055 pm_runtime_get_sync(dev->dev);
1056
1057 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1058 amdgpu_uvd_free_handles(adev, file_priv);
1059 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1060 amdgpu_vce_free_handles(adev, file_priv);
1061
1062 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1063
1064 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1065 /* TODO: how to handle reserve failure */
1066 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1067 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1068 fpriv->csa_va = NULL;
1069 amdgpu_bo_unreserve(adev->virt.csa_obj);
1070 }
1071
1072 pasid = fpriv->vm.pasid;
1073 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1074
1075 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1076 amdgpu_vm_fini(adev, &fpriv->vm);
1077
1078 if (pasid)
1079 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1080 amdgpu_bo_unref(&pd);
1081
1082 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1083 amdgpu_bo_list_put(list);
1084
1085 idr_destroy(&fpriv->bo_list_handles);
1086 mutex_destroy(&fpriv->bo_list_lock);
1087
1088 kfree(fpriv);
1089 file_priv->driver_priv = NULL;
1090
1091 pm_runtime_mark_last_busy(dev->dev);
1092 pm_runtime_put_autosuspend(dev->dev);
1093}
1094
1095/*
1096 * VBlank related functions.
1097 */
1098/**
1099 * amdgpu_get_vblank_counter_kms - get frame count
1100 *
1101 * @dev: drm dev pointer
1102 * @pipe: crtc to get the frame count from
1103 *
1104 * Gets the frame count on the requested crtc (all asics).
1105 * Returns frame count on success, -EINVAL on failure.
1106 */
1107u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1108{
1109 struct amdgpu_device *adev = dev->dev_private;
1110 int vpos, hpos, stat;
1111 u32 count;
1112
1113 if (pipe >= adev->mode_info.num_crtc) {
1114 DRM_ERROR("Invalid crtc %u\n", pipe);
1115 return -EINVAL;
1116 }
1117
1118 /* The hw increments its frame counter at start of vsync, not at start
1119 * of vblank, as is required by DRM core vblank counter handling.
1120 * Cook the hw count here to make it appear to the caller as if it
1121 * incremented at start of vblank. We measure distance to start of
1122 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1123 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1124 * result by 1 to give the proper appearance to caller.
1125 */
1126 if (adev->mode_info.crtcs[pipe]) {
1127 /* Repeat readout if needed to provide stable result if
1128 * we cross start of vsync during the queries.
1129 */
1130 do {
1131 count = amdgpu_display_vblank_get_counter(adev, pipe);
1132 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1133 * vpos as distance to start of vblank, instead of
1134 * regular vertical scanout pos.
1135 */
1136 stat = amdgpu_display_get_crtc_scanoutpos(
1137 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1138 &vpos, &hpos, NULL, NULL,
1139 &adev->mode_info.crtcs[pipe]->base.hwmode);
1140 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1141
1142 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1143 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1144 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1145 } else {
1146 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1147 pipe, vpos);
1148
1149 /* Bump counter if we are at >= leading edge of vblank,
1150 * but before vsync where vpos would turn negative and
1151 * the hw counter really increments.
1152 */
1153 if (vpos >= 0)
1154 count++;
1155 }
1156 } else {
1157 /* Fallback to use value as is. */
1158 count = amdgpu_display_vblank_get_counter(adev, pipe);
1159 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1160 }
1161
1162 return count;
1163}
1164
1165/**
1166 * amdgpu_enable_vblank_kms - enable vblank interrupt
1167 *
1168 * @dev: drm dev pointer
1169 * @pipe: crtc to enable vblank interrupt for
1170 *
1171 * Enable the interrupt on the requested crtc (all asics).
1172 * Returns 0 on success, -EINVAL on failure.
1173 */
1174int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1175{
1176 struct amdgpu_device *adev = dev->dev_private;
1177 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1178
1179 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1180}
1181
1182/**
1183 * amdgpu_disable_vblank_kms - disable vblank interrupt
1184 *
1185 * @dev: drm dev pointer
1186 * @pipe: crtc to disable vblank interrupt for
1187 *
1188 * Disable the interrupt on the requested crtc (all asics).
1189 */
1190void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1191{
1192 struct amdgpu_device *adev = dev->dev_private;
1193 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1194
1195 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1196}
1197
1198const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1199 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1200 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1201 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1202 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1203 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1204 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1205 /* KMS */
1206 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1207 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1208 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1209 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1210 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1211 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1212 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1213 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1214 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1215 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1216};
1217const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1218
1219/*
1220 * Debugfs info
1221 */
1222#if defined(CONFIG_DEBUG_FS)
1223
1224static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1225{
1226 struct drm_info_node *node = (struct drm_info_node *) m->private;
1227 struct drm_device *dev = node->minor->dev;
1228 struct amdgpu_device *adev = dev->dev_private;
1229 struct drm_amdgpu_info_firmware fw_info;
1230 struct drm_amdgpu_query_fw query_fw;
1231 struct atom_context *ctx = adev->mode_info.atom_context;
1232 int ret, i;
1233
1234 /* VCE */
1235 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1236 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1237 if (ret)
1238 return ret;
1239 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1240 fw_info.feature, fw_info.ver);
1241
1242 /* UVD */
1243 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1244 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1245 if (ret)
1246 return ret;
1247 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1248 fw_info.feature, fw_info.ver);
1249
1250 /* GMC */
1251 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1252 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1253 if (ret)
1254 return ret;
1255 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1256 fw_info.feature, fw_info.ver);
1257
1258 /* ME */
1259 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1260 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1261 if (ret)
1262 return ret;
1263 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1264 fw_info.feature, fw_info.ver);
1265
1266 /* PFP */
1267 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1268 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1269 if (ret)
1270 return ret;
1271 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1272 fw_info.feature, fw_info.ver);
1273
1274 /* CE */
1275 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1276 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1277 if (ret)
1278 return ret;
1279 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1280 fw_info.feature, fw_info.ver);
1281
1282 /* RLC */
1283 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1284 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1285 if (ret)
1286 return ret;
1287 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1288 fw_info.feature, fw_info.ver);
1289
1290 /* RLC SAVE RESTORE LIST CNTL */
1291 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1292 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1293 if (ret)
1294 return ret;
1295 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1296 fw_info.feature, fw_info.ver);
1297
1298 /* RLC SAVE RESTORE LIST GPM MEM */
1299 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1300 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1301 if (ret)
1302 return ret;
1303 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1304 fw_info.feature, fw_info.ver);
1305
1306 /* RLC SAVE RESTORE LIST SRM MEM */
1307 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1308 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1309 if (ret)
1310 return ret;
1311 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1312 fw_info.feature, fw_info.ver);
1313
1314 /* MEC */
1315 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1316 query_fw.index = 0;
1317 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1318 if (ret)
1319 return ret;
1320 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1321 fw_info.feature, fw_info.ver);
1322
1323 /* MEC2 */
1324 if (adev->asic_type == CHIP_KAVERI ||
1325 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1326 query_fw.index = 1;
1327 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1328 if (ret)
1329 return ret;
1330 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1331 fw_info.feature, fw_info.ver);
1332 }
1333
1334 /* PSP SOS */
1335 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1336 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1337 if (ret)
1338 return ret;
1339 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1340 fw_info.feature, fw_info.ver);
1341
1342
1343 /* PSP ASD */
1344 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1345 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1346 if (ret)
1347 return ret;
1348 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1349 fw_info.feature, fw_info.ver);
1350
1351 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1352 for (i = 0; i < 2; i++) {
1353 query_fw.index = i;
1354 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1355 if (ret)
1356 continue;
1357 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1358 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1359 }
1360
1361 /* SMC */
1362 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1363 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1364 if (ret)
1365 return ret;
1366 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1367 fw_info.feature, fw_info.ver);
1368
1369 /* SDMA */
1370 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1371 for (i = 0; i < adev->sdma.num_instances; i++) {
1372 query_fw.index = i;
1373 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1374 if (ret)
1375 return ret;
1376 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1377 i, fw_info.feature, fw_info.ver);
1378 }
1379
1380 /* VCN */
1381 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1382 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1383 if (ret)
1384 return ret;
1385 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1386 fw_info.feature, fw_info.ver);
1387
1388 /* DMCU */
1389 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1390 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1391 if (ret)
1392 return ret;
1393 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1394 fw_info.feature, fw_info.ver);
1395
1396
1397 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1398
1399 return 0;
1400}
1401
1402static const struct drm_info_list amdgpu_firmware_info_list[] = {
1403 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1404};
1405#endif
1406
1407int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1408{
1409#if defined(CONFIG_DEBUG_FS)
1410 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1411 ARRAY_SIZE(amdgpu_firmware_info_list));
1412#else
1413 return 0;
1414#endif
1415}