Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
30
31#include "amdgpu.h"
32#include "amdgpu_pm.h"
33#include "amdgpu_dm_debugfs.h"
34#include "amdgpu_ras.h"
35#include "amdgpu_rap.h"
36#include "amdgpu_securedisplay.h"
37#include "amdgpu_fw_attestation.h"
38#include "amdgpu_umr.h"
39
40#include "amdgpu_reset.h"
41#include "amdgpu_psp_ta.h"
42
43#if defined(CONFIG_DEBUG_FS)
44
45/**
46 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
47 *
48 * @read: True if reading
49 * @f: open file handle
50 * @buf: User buffer to write/read to
51 * @size: Number of bytes to write/read
52 * @pos: Offset to seek to
53 *
54 * This debugfs entry has special meaning on the offset being sought.
55 * Various bits have different meanings:
56 *
57 * Bit 62: Indicates a GRBM bank switch is needed
58 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
59 * zero)
60 * Bits 24..33: The SE or ME selector if needed
61 * Bits 34..43: The SH (or SA) or PIPE selector if needed
62 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
63 *
64 * Bit 23: Indicates that the PM power gating lock should be held
65 * This is necessary to read registers that might be
66 * unreliable during a power gating transistion.
67 *
68 * The lower bits are the BYTE offset of the register to read. This
69 * allows reading multiple registers in a single call and having
70 * the returned size reflect that.
71 */
72static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
73 char __user *buf, size_t size, loff_t *pos)
74{
75 struct amdgpu_device *adev = file_inode(f)->i_private;
76 ssize_t result = 0;
77 int r;
78 bool pm_pg_lock, use_bank, use_ring;
79 unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
80
81 pm_pg_lock = use_bank = use_ring = false;
82 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
83
84 if (size & 0x3 || *pos & 0x3 ||
85 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
86 return -EINVAL;
87
88 /* are we reading registers for which a PG lock is necessary? */
89 pm_pg_lock = (*pos >> 23) & 1;
90
91 if (*pos & (1ULL << 62)) {
92 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
93 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
94 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
95
96 if (se_bank == 0x3FF)
97 se_bank = 0xFFFFFFFF;
98 if (sh_bank == 0x3FF)
99 sh_bank = 0xFFFFFFFF;
100 if (instance_bank == 0x3FF)
101 instance_bank = 0xFFFFFFFF;
102 use_bank = true;
103 } else if (*pos & (1ULL << 61)) {
104
105 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
106 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
107 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
108 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
109
110 use_ring = true;
111 } else {
112 use_bank = use_ring = false;
113 }
114
115 *pos &= (1UL << 22) - 1;
116
117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
118 if (r < 0) {
119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
120 return r;
121 }
122
123 r = amdgpu_virt_enable_access_debugfs(adev);
124 if (r < 0) {
125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
126 return r;
127 }
128
129 if (use_bank) {
130 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
131 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
132 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
133 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
134 amdgpu_virt_disable_access_debugfs(adev);
135 return -EINVAL;
136 }
137 mutex_lock(&adev->grbm_idx_mutex);
138 amdgpu_gfx_select_se_sh(adev, se_bank,
139 sh_bank, instance_bank, 0);
140 } else if (use_ring) {
141 mutex_lock(&adev->srbm_mutex);
142 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0);
143 }
144
145 if (pm_pg_lock)
146 mutex_lock(&adev->pm.mutex);
147
148 while (size) {
149 uint32_t value;
150
151 if (read) {
152 value = RREG32(*pos >> 2);
153 r = put_user(value, (uint32_t *)buf);
154 } else {
155 r = get_user(value, (uint32_t *)buf);
156 if (!r)
157 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
158 }
159 if (r) {
160 result = r;
161 goto end;
162 }
163
164 result += 4;
165 buf += 4;
166 *pos += 4;
167 size -= 4;
168 }
169
170end:
171 if (use_bank) {
172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
173 mutex_unlock(&adev->grbm_idx_mutex);
174 } else if (use_ring) {
175 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
176 mutex_unlock(&adev->srbm_mutex);
177 }
178
179 if (pm_pg_lock)
180 mutex_unlock(&adev->pm.mutex);
181
182 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
183 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
184
185 amdgpu_virt_disable_access_debugfs(adev);
186 return result;
187}
188
189/*
190 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
191 */
192static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
193 size_t size, loff_t *pos)
194{
195 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
196}
197
198/*
199 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
200 */
201static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
202 size_t size, loff_t *pos)
203{
204 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
205}
206
207static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
208{
209 struct amdgpu_debugfs_regs2_data *rd;
210
211 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
212 if (!rd)
213 return -ENOMEM;
214 rd->adev = file_inode(file)->i_private;
215 file->private_data = rd;
216 mutex_init(&rd->lock);
217
218 return 0;
219}
220
221static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
222{
223 struct amdgpu_debugfs_regs2_data *rd = file->private_data;
224
225 mutex_destroy(&rd->lock);
226 kfree(file->private_data);
227 return 0;
228}
229
230static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
231{
232 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
233 struct amdgpu_device *adev = rd->adev;
234 ssize_t result = 0;
235 int r;
236 uint32_t value;
237
238 if (size & 0x3 || offset & 0x3)
239 return -EINVAL;
240
241 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
242 if (r < 0) {
243 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
244 return r;
245 }
246
247 r = amdgpu_virt_enable_access_debugfs(adev);
248 if (r < 0) {
249 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
250 return r;
251 }
252
253 mutex_lock(&rd->lock);
254
255 if (rd->id.use_grbm) {
256 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
257 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
258 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
259 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
260 amdgpu_virt_disable_access_debugfs(adev);
261 mutex_unlock(&rd->lock);
262 return -EINVAL;
263 }
264 mutex_lock(&adev->grbm_idx_mutex);
265 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
266 rd->id.grbm.sh,
267 rd->id.grbm.instance, rd->id.xcc_id);
268 }
269
270 if (rd->id.use_srbm) {
271 mutex_lock(&adev->srbm_mutex);
272 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
273 rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id);
274 }
275
276 if (rd->id.pg_lock)
277 mutex_lock(&adev->pm.mutex);
278
279 while (size) {
280 if (!write_en) {
281 value = RREG32(offset >> 2);
282 r = put_user(value, (uint32_t *)buf);
283 } else {
284 r = get_user(value, (uint32_t *)buf);
285 if (!r)
286 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id);
287 }
288 if (r) {
289 result = r;
290 goto end;
291 }
292 offset += 4;
293 size -= 4;
294 result += 4;
295 buf += 4;
296 }
297end:
298 if (rd->id.use_grbm) {
299 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id);
300 mutex_unlock(&adev->grbm_idx_mutex);
301 }
302
303 if (rd->id.use_srbm) {
304 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id);
305 mutex_unlock(&adev->srbm_mutex);
306 }
307
308 if (rd->id.pg_lock)
309 mutex_unlock(&adev->pm.mutex);
310
311 mutex_unlock(&rd->lock);
312
313 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
314 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
315
316 amdgpu_virt_disable_access_debugfs(adev);
317 return result;
318}
319
320static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
321{
322 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
323 struct amdgpu_debugfs_regs2_iocdata v1_data;
324 int r;
325
326 mutex_lock(&rd->lock);
327
328 switch (cmd) {
329 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2:
330 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data,
331 sizeof(rd->id));
332 if (r)
333 r = -EINVAL;
334 goto done;
335 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
336 r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data,
337 sizeof(v1_data));
338 if (r) {
339 r = -EINVAL;
340 goto done;
341 }
342 goto v1_copy;
343 default:
344 r = -EINVAL;
345 goto done;
346 }
347
348v1_copy:
349 rd->id.use_srbm = v1_data.use_srbm;
350 rd->id.use_grbm = v1_data.use_grbm;
351 rd->id.pg_lock = v1_data.pg_lock;
352 rd->id.grbm.se = v1_data.grbm.se;
353 rd->id.grbm.sh = v1_data.grbm.sh;
354 rd->id.grbm.instance = v1_data.grbm.instance;
355 rd->id.srbm.me = v1_data.srbm.me;
356 rd->id.srbm.pipe = v1_data.srbm.pipe;
357 rd->id.srbm.queue = v1_data.srbm.queue;
358 rd->id.xcc_id = 0;
359done:
360 mutex_unlock(&rd->lock);
361 return r;
362}
363
364static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
365{
366 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
367}
368
369static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
370{
371 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
372}
373
374static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file)
375{
376 struct amdgpu_debugfs_gprwave_data *rd;
377
378 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
379 if (!rd)
380 return -ENOMEM;
381 rd->adev = file_inode(file)->i_private;
382 file->private_data = rd;
383 mutex_init(&rd->lock);
384
385 return 0;
386}
387
388static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file)
389{
390 struct amdgpu_debugfs_gprwave_data *rd = file->private_data;
391
392 mutex_destroy(&rd->lock);
393 kfree(file->private_data);
394 return 0;
395}
396
397static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
398{
399 struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
400 struct amdgpu_device *adev = rd->adev;
401 ssize_t result = 0;
402 int r;
403 uint32_t *data, x;
404
405 if (size & 0x3 || *pos & 0x3)
406 return -EINVAL;
407
408 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
409 if (r < 0) {
410 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
411 return r;
412 }
413
414 r = amdgpu_virt_enable_access_debugfs(adev);
415 if (r < 0) {
416 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
417 return r;
418 }
419
420 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
421 if (!data) {
422 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
423 amdgpu_virt_disable_access_debugfs(adev);
424 return -ENOMEM;
425 }
426
427 /* switch to the specific se/sh/cu */
428 mutex_lock(&adev->grbm_idx_mutex);
429 amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id);
430
431 if (!rd->id.gpr_or_wave) {
432 x = 0;
433 if (adev->gfx.funcs->read_wave_data)
434 adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x);
435 } else {
436 x = size >> 2;
437 if (rd->id.gpr.vpgr_or_sgpr) {
438 if (adev->gfx.funcs->read_wave_vgprs)
439 adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data);
440 } else {
441 if (adev->gfx.funcs->read_wave_sgprs)
442 adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data);
443 }
444 }
445
446 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
447 mutex_unlock(&adev->grbm_idx_mutex);
448
449 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
450 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
451
452 if (!x) {
453 result = -EINVAL;
454 goto done;
455 }
456
457 while (size && (*pos < x * 4)) {
458 uint32_t value;
459
460 value = data[*pos >> 2];
461 r = put_user(value, (uint32_t *)buf);
462 if (r) {
463 result = r;
464 goto done;
465 }
466
467 result += 4;
468 buf += 4;
469 *pos += 4;
470 size -= 4;
471 }
472
473done:
474 amdgpu_virt_disable_access_debugfs(adev);
475 kfree(data);
476 return result;
477}
478
479static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data)
480{
481 struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
482 int r = 0;
483
484 mutex_lock(&rd->lock);
485
486 switch (cmd) {
487 case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE:
488 if (copy_from_user(&rd->id,
489 (struct amdgpu_debugfs_gprwave_iocdata *)data,
490 sizeof(rd->id)))
491 r = -EFAULT;
492 goto done;
493 default:
494 r = -EINVAL;
495 goto done;
496 }
497
498done:
499 mutex_unlock(&rd->lock);
500 return r;
501}
502
503
504
505
506/**
507 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
508 *
509 * @f: open file handle
510 * @buf: User buffer to store read data in
511 * @size: Number of bytes to read
512 * @pos: Offset to seek to
513 *
514 * The lower bits are the BYTE offset of the register to read. This
515 * allows reading multiple registers in a single call and having
516 * the returned size reflect that.
517 */
518static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
519 size_t size, loff_t *pos)
520{
521 struct amdgpu_device *adev = file_inode(f)->i_private;
522 ssize_t result = 0;
523 int r;
524
525 if (size & 0x3 || *pos & 0x3)
526 return -EINVAL;
527
528 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
529 if (r < 0) {
530 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
531 return r;
532 }
533
534 r = amdgpu_virt_enable_access_debugfs(adev);
535 if (r < 0) {
536 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
537 return r;
538 }
539
540 while (size) {
541 uint32_t value;
542
543 if (upper_32_bits(*pos))
544 value = RREG32_PCIE_EXT(*pos);
545 else
546 value = RREG32_PCIE(*pos);
547
548 r = put_user(value, (uint32_t *)buf);
549 if (r)
550 goto out;
551
552 result += 4;
553 buf += 4;
554 *pos += 4;
555 size -= 4;
556 }
557
558 r = result;
559out:
560 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
561 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
562 amdgpu_virt_disable_access_debugfs(adev);
563 return r;
564}
565
566/**
567 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
568 *
569 * @f: open file handle
570 * @buf: User buffer to write data from
571 * @size: Number of bytes to write
572 * @pos: Offset to seek to
573 *
574 * The lower bits are the BYTE offset of the register to write. This
575 * allows writing multiple registers in a single call and having
576 * the returned size reflect that.
577 */
578static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
579 size_t size, loff_t *pos)
580{
581 struct amdgpu_device *adev = file_inode(f)->i_private;
582 ssize_t result = 0;
583 int r;
584
585 if (size & 0x3 || *pos & 0x3)
586 return -EINVAL;
587
588 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
589 if (r < 0) {
590 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
591 return r;
592 }
593
594 r = amdgpu_virt_enable_access_debugfs(adev);
595 if (r < 0) {
596 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
597 return r;
598 }
599
600 while (size) {
601 uint32_t value;
602
603 r = get_user(value, (uint32_t *)buf);
604 if (r)
605 goto out;
606
607 if (upper_32_bits(*pos))
608 WREG32_PCIE_EXT(*pos, value);
609 else
610 WREG32_PCIE(*pos, value);
611
612 result += 4;
613 buf += 4;
614 *pos += 4;
615 size -= 4;
616 }
617
618 r = result;
619out:
620 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
621 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
622 amdgpu_virt_disable_access_debugfs(adev);
623 return r;
624}
625
626/**
627 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
628 *
629 * @f: open file handle
630 * @buf: User buffer to store read data in
631 * @size: Number of bytes to read
632 * @pos: Offset to seek to
633 *
634 * The lower bits are the BYTE offset of the register to read. This
635 * allows reading multiple registers in a single call and having
636 * the returned size reflect that.
637 */
638static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
639 size_t size, loff_t *pos)
640{
641 struct amdgpu_device *adev = file_inode(f)->i_private;
642 ssize_t result = 0;
643 int r;
644
645 if (size & 0x3 || *pos & 0x3)
646 return -EINVAL;
647
648 if (!adev->didt_rreg)
649 return -EOPNOTSUPP;
650
651 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
652 if (r < 0) {
653 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
654 return r;
655 }
656
657 r = amdgpu_virt_enable_access_debugfs(adev);
658 if (r < 0) {
659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
660 return r;
661 }
662
663 while (size) {
664 uint32_t value;
665
666 value = RREG32_DIDT(*pos >> 2);
667 r = put_user(value, (uint32_t *)buf);
668 if (r)
669 goto out;
670
671 result += 4;
672 buf += 4;
673 *pos += 4;
674 size -= 4;
675 }
676
677 r = result;
678out:
679 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
680 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
681 amdgpu_virt_disable_access_debugfs(adev);
682 return r;
683}
684
685/**
686 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
687 *
688 * @f: open file handle
689 * @buf: User buffer to write data from
690 * @size: Number of bytes to write
691 * @pos: Offset to seek to
692 *
693 * The lower bits are the BYTE offset of the register to write. This
694 * allows writing multiple registers in a single call and having
695 * the returned size reflect that.
696 */
697static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
698 size_t size, loff_t *pos)
699{
700 struct amdgpu_device *adev = file_inode(f)->i_private;
701 ssize_t result = 0;
702 int r;
703
704 if (size & 0x3 || *pos & 0x3)
705 return -EINVAL;
706
707 if (!adev->didt_wreg)
708 return -EOPNOTSUPP;
709
710 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
711 if (r < 0) {
712 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
713 return r;
714 }
715
716 r = amdgpu_virt_enable_access_debugfs(adev);
717 if (r < 0) {
718 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
719 return r;
720 }
721
722 while (size) {
723 uint32_t value;
724
725 r = get_user(value, (uint32_t *)buf);
726 if (r)
727 goto out;
728
729 WREG32_DIDT(*pos >> 2, value);
730
731 result += 4;
732 buf += 4;
733 *pos += 4;
734 size -= 4;
735 }
736
737 r = result;
738out:
739 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
740 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
741 amdgpu_virt_disable_access_debugfs(adev);
742 return r;
743}
744
745/**
746 * amdgpu_debugfs_regs_smc_read - Read from a SMC register
747 *
748 * @f: open file handle
749 * @buf: User buffer to store read data in
750 * @size: Number of bytes to read
751 * @pos: Offset to seek to
752 *
753 * The lower bits are the BYTE offset of the register to read. This
754 * allows reading multiple registers in a single call and having
755 * the returned size reflect that.
756 */
757static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
758 size_t size, loff_t *pos)
759{
760 struct amdgpu_device *adev = file_inode(f)->i_private;
761 ssize_t result = 0;
762 int r;
763
764 if (!adev->smc_rreg)
765 return -EOPNOTSUPP;
766
767 if (size & 0x3 || *pos & 0x3)
768 return -EINVAL;
769
770 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
771 if (r < 0) {
772 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
773 return r;
774 }
775
776 r = amdgpu_virt_enable_access_debugfs(adev);
777 if (r < 0) {
778 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
779 return r;
780 }
781
782 while (size) {
783 uint32_t value;
784
785 value = RREG32_SMC(*pos);
786 r = put_user(value, (uint32_t *)buf);
787 if (r)
788 goto out;
789
790 result += 4;
791 buf += 4;
792 *pos += 4;
793 size -= 4;
794 }
795
796 r = result;
797out:
798 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
799 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
800 amdgpu_virt_disable_access_debugfs(adev);
801 return r;
802}
803
804/**
805 * amdgpu_debugfs_regs_smc_write - Write to a SMC register
806 *
807 * @f: open file handle
808 * @buf: User buffer to write data from
809 * @size: Number of bytes to write
810 * @pos: Offset to seek to
811 *
812 * The lower bits are the BYTE offset of the register to write. This
813 * allows writing multiple registers in a single call and having
814 * the returned size reflect that.
815 */
816static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
817 size_t size, loff_t *pos)
818{
819 struct amdgpu_device *adev = file_inode(f)->i_private;
820 ssize_t result = 0;
821 int r;
822
823 if (!adev->smc_wreg)
824 return -EOPNOTSUPP;
825
826 if (size & 0x3 || *pos & 0x3)
827 return -EINVAL;
828
829 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
830 if (r < 0) {
831 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
832 return r;
833 }
834
835 r = amdgpu_virt_enable_access_debugfs(adev);
836 if (r < 0) {
837 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
838 return r;
839 }
840
841 while (size) {
842 uint32_t value;
843
844 r = get_user(value, (uint32_t *)buf);
845 if (r)
846 goto out;
847
848 WREG32_SMC(*pos, value);
849
850 result += 4;
851 buf += 4;
852 *pos += 4;
853 size -= 4;
854 }
855
856 r = result;
857out:
858 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
859 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
860 amdgpu_virt_disable_access_debugfs(adev);
861 return r;
862}
863
864/**
865 * amdgpu_debugfs_gca_config_read - Read from gfx config data
866 *
867 * @f: open file handle
868 * @buf: User buffer to store read data in
869 * @size: Number of bytes to read
870 * @pos: Offset to seek to
871 *
872 * This file is used to access configuration data in a somewhat
873 * stable fashion. The format is a series of DWORDs with the first
874 * indicating which revision it is. New content is appended to the
875 * end so that older software can still read the data.
876 */
877
878static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
879 size_t size, loff_t *pos)
880{
881 struct amdgpu_device *adev = file_inode(f)->i_private;
882 ssize_t result = 0;
883 int r;
884 uint32_t *config, no_regs = 0;
885
886 if (size & 0x3 || *pos & 0x3)
887 return -EINVAL;
888
889 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
890 if (!config)
891 return -ENOMEM;
892
893 /* version, increment each time something is added */
894 config[no_regs++] = 5;
895 config[no_regs++] = adev->gfx.config.max_shader_engines;
896 config[no_regs++] = adev->gfx.config.max_tile_pipes;
897 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
898 config[no_regs++] = adev->gfx.config.max_sh_per_se;
899 config[no_regs++] = adev->gfx.config.max_backends_per_se;
900 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
901 config[no_regs++] = adev->gfx.config.max_gprs;
902 config[no_regs++] = adev->gfx.config.max_gs_threads;
903 config[no_regs++] = adev->gfx.config.max_hw_contexts;
904 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
905 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
906 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
907 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
908 config[no_regs++] = adev->gfx.config.num_tile_pipes;
909 config[no_regs++] = adev->gfx.config.backend_enable_mask;
910 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
911 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
912 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
913 config[no_regs++] = adev->gfx.config.num_gpus;
914 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
915 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
916 config[no_regs++] = adev->gfx.config.gb_addr_config;
917 config[no_regs++] = adev->gfx.config.num_rbs;
918
919 /* rev==1 */
920 config[no_regs++] = adev->rev_id;
921 config[no_regs++] = lower_32_bits(adev->pg_flags);
922 config[no_regs++] = lower_32_bits(adev->cg_flags);
923
924 /* rev==2 */
925 config[no_regs++] = adev->family;
926 config[no_regs++] = adev->external_rev_id;
927
928 /* rev==3 */
929 config[no_regs++] = adev->pdev->device;
930 config[no_regs++] = adev->pdev->revision;
931 config[no_regs++] = adev->pdev->subsystem_device;
932 config[no_regs++] = adev->pdev->subsystem_vendor;
933
934 /* rev==4 APU flag */
935 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
936
937 /* rev==5 PG/CG flag upper 32bit */
938 config[no_regs++] = upper_32_bits(adev->pg_flags);
939 config[no_regs++] = upper_32_bits(adev->cg_flags);
940
941 while (size && (*pos < no_regs * 4)) {
942 uint32_t value;
943
944 value = config[*pos >> 2];
945 r = put_user(value, (uint32_t *)buf);
946 if (r) {
947 kfree(config);
948 return r;
949 }
950
951 result += 4;
952 buf += 4;
953 *pos += 4;
954 size -= 4;
955 }
956
957 kfree(config);
958 return result;
959}
960
961/**
962 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
963 *
964 * @f: open file handle
965 * @buf: User buffer to store read data in
966 * @size: Number of bytes to read
967 * @pos: Offset to seek to
968 *
969 * The offset is treated as the BYTE address of one of the sensors
970 * enumerated in amd/include/kgd_pp_interface.h under the
971 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
972 * you would use the offset 3 * 4 = 12.
973 */
974static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
975 size_t size, loff_t *pos)
976{
977 struct amdgpu_device *adev = file_inode(f)->i_private;
978 int idx, x, outsize, r, valuesize;
979 uint32_t values[16];
980
981 if (size & 3 || *pos & 0x3)
982 return -EINVAL;
983
984 if (!adev->pm.dpm_enabled)
985 return -EINVAL;
986
987 /* convert offset to sensor number */
988 idx = *pos >> 2;
989
990 valuesize = sizeof(values);
991
992 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
993 if (r < 0) {
994 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
995 return r;
996 }
997
998 r = amdgpu_virt_enable_access_debugfs(adev);
999 if (r < 0) {
1000 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1001 return r;
1002 }
1003
1004 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
1005
1006 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1007 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1008
1009 if (r) {
1010 amdgpu_virt_disable_access_debugfs(adev);
1011 return r;
1012 }
1013
1014 if (size > valuesize) {
1015 amdgpu_virt_disable_access_debugfs(adev);
1016 return -EINVAL;
1017 }
1018
1019 outsize = 0;
1020 x = 0;
1021 if (!r) {
1022 while (size) {
1023 r = put_user(values[x++], (int32_t *)buf);
1024 buf += 4;
1025 size -= 4;
1026 outsize += 4;
1027 }
1028 }
1029
1030 amdgpu_virt_disable_access_debugfs(adev);
1031 return !r ? outsize : r;
1032}
1033
1034/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
1035 *
1036 * @f: open file handle
1037 * @buf: User buffer to store read data in
1038 * @size: Number of bytes to read
1039 * @pos: Offset to seek to
1040 *
1041 * The offset being sought changes which wave that the status data
1042 * will be returned for. The bits are used as follows:
1043 *
1044 * Bits 0..6: Byte offset into data
1045 * Bits 7..14: SE selector
1046 * Bits 15..22: SH/SA selector
1047 * Bits 23..30: CU/{WGP+SIMD} selector
1048 * Bits 31..36: WAVE ID selector
1049 * Bits 37..44: SIMD ID selector
1050 *
1051 * The returned data begins with one DWORD of version information
1052 * Followed by WAVE STATUS registers relevant to the GFX IP version
1053 * being used. See gfx_v8_0_read_wave_data() for an example output.
1054 */
1055static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
1056 size_t size, loff_t *pos)
1057{
1058 struct amdgpu_device *adev = f->f_inode->i_private;
1059 int r, x;
1060 ssize_t result = 0;
1061 uint32_t offset, se, sh, cu, wave, simd, data[32];
1062
1063 if (size & 3 || *pos & 3)
1064 return -EINVAL;
1065
1066 /* decode offset */
1067 offset = (*pos & GENMASK_ULL(6, 0));
1068 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
1069 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
1070 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
1071 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
1072 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
1073
1074 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1075 if (r < 0) {
1076 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1077 return r;
1078 }
1079
1080 r = amdgpu_virt_enable_access_debugfs(adev);
1081 if (r < 0) {
1082 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1083 return r;
1084 }
1085
1086 /* switch to the specific se/sh/cu */
1087 mutex_lock(&adev->grbm_idx_mutex);
1088 amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
1089
1090 x = 0;
1091 if (adev->gfx.funcs->read_wave_data)
1092 adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x);
1093
1094 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
1095 mutex_unlock(&adev->grbm_idx_mutex);
1096
1097 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1098 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1099
1100 if (!x) {
1101 amdgpu_virt_disable_access_debugfs(adev);
1102 return -EINVAL;
1103 }
1104
1105 while (size && (offset < x * 4)) {
1106 uint32_t value;
1107
1108 value = data[offset >> 2];
1109 r = put_user(value, (uint32_t *)buf);
1110 if (r) {
1111 amdgpu_virt_disable_access_debugfs(adev);
1112 return r;
1113 }
1114
1115 result += 4;
1116 buf += 4;
1117 offset += 4;
1118 size -= 4;
1119 }
1120
1121 amdgpu_virt_disable_access_debugfs(adev);
1122 return result;
1123}
1124
1125/** amdgpu_debugfs_gpr_read - Read wave gprs
1126 *
1127 * @f: open file handle
1128 * @buf: User buffer to store read data in
1129 * @size: Number of bytes to read
1130 * @pos: Offset to seek to
1131 *
1132 * The offset being sought changes which wave that the status data
1133 * will be returned for. The bits are used as follows:
1134 *
1135 * Bits 0..11: Byte offset into data
1136 * Bits 12..19: SE selector
1137 * Bits 20..27: SH/SA selector
1138 * Bits 28..35: CU/{WGP+SIMD} selector
1139 * Bits 36..43: WAVE ID selector
1140 * Bits 37..44: SIMD ID selector
1141 * Bits 52..59: Thread selector
1142 * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
1143 *
1144 * The return data comes from the SGPR or VGPR register bank for
1145 * the selected operational unit.
1146 */
1147static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
1148 size_t size, loff_t *pos)
1149{
1150 struct amdgpu_device *adev = f->f_inode->i_private;
1151 int r;
1152 ssize_t result = 0;
1153 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
1154
1155 if (size > 4096 || size & 3 || *pos & 3)
1156 return -EINVAL;
1157
1158 /* decode offset */
1159 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
1160 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
1161 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
1162 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
1163 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
1164 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
1165 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
1166 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
1167
1168 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
1169 if (!data)
1170 return -ENOMEM;
1171
1172 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1173 if (r < 0)
1174 goto err;
1175
1176 r = amdgpu_virt_enable_access_debugfs(adev);
1177 if (r < 0)
1178 goto err;
1179
1180 /* switch to the specific se/sh/cu */
1181 mutex_lock(&adev->grbm_idx_mutex);
1182 amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
1183
1184 if (bank == 0) {
1185 if (adev->gfx.funcs->read_wave_vgprs)
1186 adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data);
1187 } else {
1188 if (adev->gfx.funcs->read_wave_sgprs)
1189 adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data);
1190 }
1191
1192 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
1193 mutex_unlock(&adev->grbm_idx_mutex);
1194
1195 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1196 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1197
1198 while (size) {
1199 uint32_t value;
1200
1201 value = data[result >> 2];
1202 r = put_user(value, (uint32_t *)buf);
1203 if (r) {
1204 amdgpu_virt_disable_access_debugfs(adev);
1205 goto err;
1206 }
1207
1208 result += 4;
1209 buf += 4;
1210 size -= 4;
1211 }
1212
1213 kfree(data);
1214 amdgpu_virt_disable_access_debugfs(adev);
1215 return result;
1216
1217err:
1218 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1219 kfree(data);
1220 return r;
1221}
1222
1223/**
1224 * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
1225 *
1226 * @f: open file handle
1227 * @buf: User buffer to store read data in
1228 * @size: Number of bytes to read
1229 * @pos: Offset to seek to
1230 *
1231 * Read the last residency value logged. It doesn't auto update, one needs to
1232 * stop logging before getting the current value.
1233 */
1234static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
1235 size_t size, loff_t *pos)
1236{
1237 struct amdgpu_device *adev = file_inode(f)->i_private;
1238 ssize_t result = 0;
1239 int r;
1240
1241 if (size & 0x3 || *pos & 0x3)
1242 return -EINVAL;
1243
1244 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1245 if (r < 0) {
1246 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1247 return r;
1248 }
1249
1250 while (size) {
1251 uint32_t value;
1252
1253 r = amdgpu_get_gfx_off_residency(adev, &value);
1254 if (r)
1255 goto out;
1256
1257 r = put_user(value, (uint32_t *)buf);
1258 if (r)
1259 goto out;
1260
1261 result += 4;
1262 buf += 4;
1263 *pos += 4;
1264 size -= 4;
1265 }
1266
1267 r = result;
1268out:
1269 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1270 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1271
1272 return r;
1273}
1274
1275/**
1276 * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
1277 *
1278 * @f: open file handle
1279 * @buf: User buffer to write data from
1280 * @size: Number of bytes to write
1281 * @pos: Offset to seek to
1282 *
1283 * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
1284 */
1285static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
1286 size_t size, loff_t *pos)
1287{
1288 struct amdgpu_device *adev = file_inode(f)->i_private;
1289 ssize_t result = 0;
1290 int r;
1291
1292 if (size & 0x3 || *pos & 0x3)
1293 return -EINVAL;
1294
1295 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1296 if (r < 0) {
1297 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1298 return r;
1299 }
1300
1301 while (size) {
1302 u32 value;
1303
1304 r = get_user(value, (uint32_t *)buf);
1305 if (r)
1306 goto out;
1307
1308 amdgpu_set_gfx_off_residency(adev, value ? true : false);
1309
1310 result += 4;
1311 buf += 4;
1312 *pos += 4;
1313 size -= 4;
1314 }
1315
1316 r = result;
1317out:
1318 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1319 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1320
1321 return r;
1322}
1323
1324
1325/**
1326 * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
1327 *
1328 * @f: open file handle
1329 * @buf: User buffer to store read data in
1330 * @size: Number of bytes to read
1331 * @pos: Offset to seek to
1332 */
1333static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
1334 size_t size, loff_t *pos)
1335{
1336 struct amdgpu_device *adev = file_inode(f)->i_private;
1337 ssize_t result = 0;
1338 int r;
1339
1340 if (size & 0x3 || *pos & 0x3)
1341 return -EINVAL;
1342
1343 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1344 if (r < 0) {
1345 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1346 return r;
1347 }
1348
1349 while (size) {
1350 u64 value = 0;
1351
1352 r = amdgpu_get_gfx_off_entrycount(adev, &value);
1353 if (r)
1354 goto out;
1355
1356 r = put_user(value, (u64 *)buf);
1357 if (r)
1358 goto out;
1359
1360 result += 4;
1361 buf += 4;
1362 *pos += 4;
1363 size -= 4;
1364 }
1365
1366 r = result;
1367out:
1368 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1369 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1370
1371 return r;
1372}
1373
1374/**
1375 * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
1376 *
1377 * @f: open file handle
1378 * @buf: User buffer to write data from
1379 * @size: Number of bytes to write
1380 * @pos: Offset to seek to
1381 *
1382 * Write a 32-bit zero to disable or a 32-bit non-zero to enable
1383 */
1384static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1385 size_t size, loff_t *pos)
1386{
1387 struct amdgpu_device *adev = file_inode(f)->i_private;
1388 ssize_t result = 0;
1389 int r;
1390
1391 if (size & 0x3 || *pos & 0x3)
1392 return -EINVAL;
1393
1394 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1395 if (r < 0) {
1396 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1397 return r;
1398 }
1399
1400 while (size) {
1401 uint32_t value;
1402
1403 r = get_user(value, (uint32_t *)buf);
1404 if (r)
1405 goto out;
1406
1407 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1408
1409 result += 4;
1410 buf += 4;
1411 *pos += 4;
1412 size -= 4;
1413 }
1414
1415 r = result;
1416out:
1417 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1418 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1419
1420 return r;
1421}
1422
1423
1424/**
1425 * amdgpu_debugfs_gfxoff_read - read gfxoff status
1426 *
1427 * @f: open file handle
1428 * @buf: User buffer to store read data in
1429 * @size: Number of bytes to read
1430 * @pos: Offset to seek to
1431 */
1432static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1433 size_t size, loff_t *pos)
1434{
1435 struct amdgpu_device *adev = file_inode(f)->i_private;
1436 ssize_t result = 0;
1437 int r;
1438
1439 if (size & 0x3 || *pos & 0x3)
1440 return -EINVAL;
1441
1442 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1443 if (r < 0) {
1444 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1445 return r;
1446 }
1447
1448 while (size) {
1449 u32 value = adev->gfx.gfx_off_state;
1450
1451 r = put_user(value, (u32 *)buf);
1452 if (r)
1453 goto out;
1454
1455 result += 4;
1456 buf += 4;
1457 *pos += 4;
1458 size -= 4;
1459 }
1460
1461 r = result;
1462out:
1463 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1464 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1465
1466 return r;
1467}
1468
1469static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf,
1470 size_t size, loff_t *pos)
1471{
1472 struct amdgpu_device *adev = file_inode(f)->i_private;
1473 ssize_t result = 0;
1474 int r;
1475
1476 if (size & 0x3 || *pos & 0x3)
1477 return -EINVAL;
1478
1479 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1480 if (r < 0) {
1481 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1482 return r;
1483 }
1484
1485 while (size) {
1486 u32 value;
1487
1488 r = amdgpu_get_gfx_off_status(adev, &value);
1489 if (r)
1490 goto out;
1491
1492 r = put_user(value, (u32 *)buf);
1493 if (r)
1494 goto out;
1495
1496 result += 4;
1497 buf += 4;
1498 *pos += 4;
1499 size -= 4;
1500 }
1501
1502 r = result;
1503out:
1504 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1505 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1506
1507 return r;
1508}
1509
1510static const struct file_operations amdgpu_debugfs_regs2_fops = {
1511 .owner = THIS_MODULE,
1512 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
1513 .read = amdgpu_debugfs_regs2_read,
1514 .write = amdgpu_debugfs_regs2_write,
1515 .open = amdgpu_debugfs_regs2_open,
1516 .release = amdgpu_debugfs_regs2_release,
1517 .llseek = default_llseek
1518};
1519
1520static const struct file_operations amdgpu_debugfs_gprwave_fops = {
1521 .owner = THIS_MODULE,
1522 .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl,
1523 .read = amdgpu_debugfs_gprwave_read,
1524 .open = amdgpu_debugfs_gprwave_open,
1525 .release = amdgpu_debugfs_gprwave_release,
1526 .llseek = default_llseek
1527};
1528
1529static const struct file_operations amdgpu_debugfs_regs_fops = {
1530 .owner = THIS_MODULE,
1531 .read = amdgpu_debugfs_regs_read,
1532 .write = amdgpu_debugfs_regs_write,
1533 .llseek = default_llseek
1534};
1535static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1536 .owner = THIS_MODULE,
1537 .read = amdgpu_debugfs_regs_didt_read,
1538 .write = amdgpu_debugfs_regs_didt_write,
1539 .llseek = default_llseek
1540};
1541static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1542 .owner = THIS_MODULE,
1543 .read = amdgpu_debugfs_regs_pcie_read,
1544 .write = amdgpu_debugfs_regs_pcie_write,
1545 .llseek = default_llseek
1546};
1547static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1548 .owner = THIS_MODULE,
1549 .read = amdgpu_debugfs_regs_smc_read,
1550 .write = amdgpu_debugfs_regs_smc_write,
1551 .llseek = default_llseek
1552};
1553
1554static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1555 .owner = THIS_MODULE,
1556 .read = amdgpu_debugfs_gca_config_read,
1557 .llseek = default_llseek
1558};
1559
1560static const struct file_operations amdgpu_debugfs_sensors_fops = {
1561 .owner = THIS_MODULE,
1562 .read = amdgpu_debugfs_sensor_read,
1563 .llseek = default_llseek
1564};
1565
1566static const struct file_operations amdgpu_debugfs_wave_fops = {
1567 .owner = THIS_MODULE,
1568 .read = amdgpu_debugfs_wave_read,
1569 .llseek = default_llseek
1570};
1571static const struct file_operations amdgpu_debugfs_gpr_fops = {
1572 .owner = THIS_MODULE,
1573 .read = amdgpu_debugfs_gpr_read,
1574 .llseek = default_llseek
1575};
1576
1577static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1578 .owner = THIS_MODULE,
1579 .read = amdgpu_debugfs_gfxoff_read,
1580 .write = amdgpu_debugfs_gfxoff_write,
1581 .llseek = default_llseek
1582};
1583
1584static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
1585 .owner = THIS_MODULE,
1586 .read = amdgpu_debugfs_gfxoff_status_read,
1587 .llseek = default_llseek
1588};
1589
1590static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
1591 .owner = THIS_MODULE,
1592 .read = amdgpu_debugfs_gfxoff_count_read,
1593 .llseek = default_llseek
1594};
1595
1596static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
1597 .owner = THIS_MODULE,
1598 .read = amdgpu_debugfs_gfxoff_residency_read,
1599 .write = amdgpu_debugfs_gfxoff_residency_write,
1600 .llseek = default_llseek
1601};
1602
1603static const struct file_operations *debugfs_regs[] = {
1604 &amdgpu_debugfs_regs_fops,
1605 &amdgpu_debugfs_regs2_fops,
1606 &amdgpu_debugfs_gprwave_fops,
1607 &amdgpu_debugfs_regs_didt_fops,
1608 &amdgpu_debugfs_regs_pcie_fops,
1609 &amdgpu_debugfs_regs_smc_fops,
1610 &amdgpu_debugfs_gca_config_fops,
1611 &amdgpu_debugfs_sensors_fops,
1612 &amdgpu_debugfs_wave_fops,
1613 &amdgpu_debugfs_gpr_fops,
1614 &amdgpu_debugfs_gfxoff_fops,
1615 &amdgpu_debugfs_gfxoff_status_fops,
1616 &amdgpu_debugfs_gfxoff_count_fops,
1617 &amdgpu_debugfs_gfxoff_residency_fops,
1618};
1619
1620static const char * const debugfs_regs_names[] = {
1621 "amdgpu_regs",
1622 "amdgpu_regs2",
1623 "amdgpu_gprwave",
1624 "amdgpu_regs_didt",
1625 "amdgpu_regs_pcie",
1626 "amdgpu_regs_smc",
1627 "amdgpu_gca_config",
1628 "amdgpu_sensors",
1629 "amdgpu_wave",
1630 "amdgpu_gpr",
1631 "amdgpu_gfxoff",
1632 "amdgpu_gfxoff_status",
1633 "amdgpu_gfxoff_count",
1634 "amdgpu_gfxoff_residency",
1635};
1636
1637/**
1638 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
1639 * register access.
1640 *
1641 * @adev: The device to attach the debugfs entries to
1642 */
1643int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1644{
1645 struct drm_minor *minor = adev_to_drm(adev)->primary;
1646 struct dentry *ent, *root = minor->debugfs_root;
1647 unsigned int i;
1648
1649 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1650 ent = debugfs_create_file(debugfs_regs_names[i],
1651 S_IFREG | 0444, root,
1652 adev, debugfs_regs[i]);
1653 if (!i && !IS_ERR_OR_NULL(ent))
1654 i_size_write(ent->d_inode, adev->rmmio_size);
1655 }
1656
1657 return 0;
1658}
1659
1660static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
1661{
1662 struct amdgpu_device *adev = m->private;
1663 struct drm_device *dev = adev_to_drm(adev);
1664 int r = 0, i;
1665
1666 r = pm_runtime_get_sync(dev->dev);
1667 if (r < 0) {
1668 pm_runtime_put_autosuspend(dev->dev);
1669 return r;
1670 }
1671
1672 /* Avoid accidently unparking the sched thread during GPU reset */
1673 r = down_write_killable(&adev->reset_domain->sem);
1674 if (r)
1675 return r;
1676
1677 /* hold on the scheduler */
1678 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1679 struct amdgpu_ring *ring = adev->rings[i];
1680
1681 if (!amdgpu_ring_sched_ready(ring))
1682 continue;
1683 drm_sched_wqueue_stop(&ring->sched);
1684 }
1685
1686 seq_puts(m, "run ib test:\n");
1687 r = amdgpu_ib_ring_tests(adev);
1688 if (r)
1689 seq_printf(m, "ib ring tests failed (%d).\n", r);
1690 else
1691 seq_puts(m, "ib ring tests passed.\n");
1692
1693 /* go on the scheduler */
1694 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1695 struct amdgpu_ring *ring = adev->rings[i];
1696
1697 if (!amdgpu_ring_sched_ready(ring))
1698 continue;
1699 drm_sched_wqueue_start(&ring->sched);
1700 }
1701
1702 up_write(&adev->reset_domain->sem);
1703
1704 pm_runtime_mark_last_busy(dev->dev);
1705 pm_runtime_put_autosuspend(dev->dev);
1706
1707 return 0;
1708}
1709
1710static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
1711{
1712 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1713 struct drm_device *dev = adev_to_drm(adev);
1714 int r;
1715
1716 r = pm_runtime_get_sync(dev->dev);
1717 if (r < 0) {
1718 pm_runtime_put_autosuspend(dev->dev);
1719 return r;
1720 }
1721
1722 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
1723
1724 pm_runtime_mark_last_busy(dev->dev);
1725 pm_runtime_put_autosuspend(dev->dev);
1726
1727 return 0;
1728}
1729
1730
1731static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
1732{
1733 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1734 struct drm_device *dev = adev_to_drm(adev);
1735 int r;
1736
1737 r = pm_runtime_get_sync(dev->dev);
1738 if (r < 0) {
1739 pm_runtime_put_autosuspend(dev->dev);
1740 return r;
1741 }
1742
1743 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
1744
1745 pm_runtime_mark_last_busy(dev->dev);
1746 pm_runtime_put_autosuspend(dev->dev);
1747
1748 return 0;
1749}
1750
1751static int amdgpu_debugfs_benchmark(void *data, u64 val)
1752{
1753 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1754 struct drm_device *dev = adev_to_drm(adev);
1755 int r;
1756
1757 r = pm_runtime_get_sync(dev->dev);
1758 if (r < 0) {
1759 pm_runtime_put_autosuspend(dev->dev);
1760 return r;
1761 }
1762
1763 r = amdgpu_benchmark(adev, val);
1764
1765 pm_runtime_mark_last_busy(dev->dev);
1766 pm_runtime_put_autosuspend(dev->dev);
1767
1768 return r;
1769}
1770
1771static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
1772{
1773 struct amdgpu_device *adev = m->private;
1774 struct drm_device *dev = adev_to_drm(adev);
1775 struct drm_file *file;
1776 int r;
1777
1778 r = mutex_lock_interruptible(&dev->filelist_mutex);
1779 if (r)
1780 return r;
1781
1782 list_for_each_entry(file, &dev->filelist, lhead) {
1783 struct amdgpu_fpriv *fpriv = file->driver_priv;
1784 struct amdgpu_vm *vm = &fpriv->vm;
1785 struct amdgpu_task_info *ti;
1786
1787 ti = amdgpu_vm_get_task_info_vm(vm);
1788 if (ti) {
1789 seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
1790 amdgpu_vm_put_task_info(ti);
1791 }
1792
1793 r = amdgpu_bo_reserve(vm->root.bo, true);
1794 if (r)
1795 break;
1796 amdgpu_debugfs_vm_bo_info(vm, m);
1797 amdgpu_bo_unreserve(vm->root.bo);
1798 }
1799
1800 mutex_unlock(&dev->filelist_mutex);
1801
1802 return r;
1803}
1804
1805DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
1806DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
1807DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
1808 NULL, "%lld\n");
1809DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
1810 NULL, "%lld\n");
1811DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark,
1812 "%lld\n");
1813
1814static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1815 struct dma_fence **fences)
1816{
1817 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1818 uint32_t sync_seq, last_seq;
1819
1820 last_seq = atomic_read(&ring->fence_drv.last_seq);
1821 sync_seq = ring->fence_drv.sync_seq;
1822
1823 last_seq &= drv->num_fences_mask;
1824 sync_seq &= drv->num_fences_mask;
1825
1826 do {
1827 struct dma_fence *fence, **ptr;
1828
1829 ++last_seq;
1830 last_seq &= drv->num_fences_mask;
1831 ptr = &drv->fences[last_seq];
1832
1833 fence = rcu_dereference_protected(*ptr, 1);
1834 RCU_INIT_POINTER(*ptr, NULL);
1835
1836 if (!fence)
1837 continue;
1838
1839 fences[last_seq] = fence;
1840
1841 } while (last_seq != sync_seq);
1842}
1843
1844static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1845 int length)
1846{
1847 int i;
1848 struct dma_fence *fence;
1849
1850 for (i = 0; i < length; i++) {
1851 fence = fences[i];
1852 if (!fence)
1853 continue;
1854 dma_fence_signal(fence);
1855 dma_fence_put(fence);
1856 }
1857}
1858
1859static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1860{
1861 struct drm_sched_job *s_job;
1862 struct dma_fence *fence;
1863
1864 spin_lock(&sched->job_list_lock);
1865 list_for_each_entry(s_job, &sched->pending_list, list) {
1866 fence = sched->ops->run_job(s_job);
1867 dma_fence_put(fence);
1868 }
1869 spin_unlock(&sched->job_list_lock);
1870}
1871
1872static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1873{
1874 struct amdgpu_job *job;
1875 struct drm_sched_job *s_job, *tmp;
1876 uint32_t preempt_seq;
1877 struct dma_fence *fence, **ptr;
1878 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1879 struct drm_gpu_scheduler *sched = &ring->sched;
1880 bool preempted = true;
1881
1882 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1883 return;
1884
1885 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1886 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1887 preempted = false;
1888 goto no_preempt;
1889 }
1890
1891 preempt_seq &= drv->num_fences_mask;
1892 ptr = &drv->fences[preempt_seq];
1893 fence = rcu_dereference_protected(*ptr, 1);
1894
1895no_preempt:
1896 spin_lock(&sched->job_list_lock);
1897 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
1898 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1899 /* remove job from ring_mirror_list */
1900 list_del_init(&s_job->list);
1901 sched->ops->free_job(s_job);
1902 continue;
1903 }
1904 job = to_amdgpu_job(s_job);
1905 if (preempted && (&job->hw_fence) == fence)
1906 /* mark the job as preempted */
1907 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1908 }
1909 spin_unlock(&sched->job_list_lock);
1910}
1911
1912static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1913{
1914 int r, length;
1915 struct amdgpu_ring *ring;
1916 struct dma_fence **fences = NULL;
1917 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1918
1919 if (val >= AMDGPU_MAX_RINGS)
1920 return -EINVAL;
1921
1922 ring = adev->rings[val];
1923
1924 if (!amdgpu_ring_sched_ready(ring) ||
1925 !ring->funcs->preempt_ib)
1926 return -EINVAL;
1927
1928 /* the last preemption failed */
1929 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1930 return -EBUSY;
1931
1932 length = ring->fence_drv.num_fences_mask + 1;
1933 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1934 if (!fences)
1935 return -ENOMEM;
1936
1937 /* Avoid accidently unparking the sched thread during GPU reset */
1938 r = down_read_killable(&adev->reset_domain->sem);
1939 if (r)
1940 goto pro_end;
1941
1942 /* stop the scheduler */
1943 drm_sched_wqueue_stop(&ring->sched);
1944
1945 /* preempt the IB */
1946 r = amdgpu_ring_preempt_ib(ring);
1947 if (r) {
1948 DRM_WARN("failed to preempt ring %d\n", ring->idx);
1949 goto failure;
1950 }
1951
1952 amdgpu_fence_process(ring);
1953
1954 if (atomic_read(&ring->fence_drv.last_seq) !=
1955 ring->fence_drv.sync_seq) {
1956 DRM_INFO("ring %d was preempted\n", ring->idx);
1957
1958 amdgpu_ib_preempt_mark_partial_job(ring);
1959
1960 /* swap out the old fences */
1961 amdgpu_ib_preempt_fences_swap(ring, fences);
1962
1963 amdgpu_fence_driver_force_completion(ring);
1964
1965 /* resubmit unfinished jobs */
1966 amdgpu_ib_preempt_job_recovery(&ring->sched);
1967
1968 /* wait for jobs finished */
1969 amdgpu_fence_wait_empty(ring);
1970
1971 /* signal the old fences */
1972 amdgpu_ib_preempt_signal_fences(fences, length);
1973 }
1974
1975failure:
1976 /* restart the scheduler */
1977 drm_sched_wqueue_start(&ring->sched);
1978
1979 up_read(&adev->reset_domain->sem);
1980
1981pro_end:
1982 kfree(fences);
1983
1984 return r;
1985}
1986
1987static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1988{
1989 int ret = 0;
1990 uint32_t max_freq, min_freq;
1991 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1992
1993 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1994 return -EINVAL;
1995
1996 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1997 if (ret < 0) {
1998 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1999 return ret;
2000 }
2001
2002 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
2003 if (ret == -EOPNOTSUPP) {
2004 ret = 0;
2005 goto out;
2006 }
2007 if (ret || val > max_freq || val < min_freq) {
2008 ret = -EINVAL;
2009 goto out;
2010 }
2011
2012 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
2013 if (ret)
2014 ret = -EINVAL;
2015
2016out:
2017 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2018 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2019
2020 return ret;
2021}
2022
2023DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
2024 amdgpu_debugfs_ib_preempt, "%llu\n");
2025
2026DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
2027 amdgpu_debugfs_sclk_set, "%llu\n");
2028
2029static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
2030 char __user *buf, size_t size, loff_t *pos)
2031{
2032 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
2033 char reg_offset[12];
2034 int i, ret, len = 0;
2035
2036 if (*pos)
2037 return 0;
2038
2039 memset(reg_offset, 0, 12);
2040 ret = down_read_killable(&adev->reset_domain->sem);
2041 if (ret)
2042 return ret;
2043
2044 for (i = 0; i < adev->reset_info.num_regs; i++) {
2045 sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]);
2046 up_read(&adev->reset_domain->sem);
2047 if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
2048 return -EFAULT;
2049
2050 len += strlen(reg_offset);
2051 ret = down_read_killable(&adev->reset_domain->sem);
2052 if (ret)
2053 return ret;
2054 }
2055
2056 up_read(&adev->reset_domain->sem);
2057 *pos += len;
2058
2059 return len;
2060}
2061
2062static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
2063 const char __user *buf, size_t size, loff_t *pos)
2064{
2065 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
2066 char reg_offset[11];
2067 uint32_t *new = NULL, *tmp = NULL;
2068 int ret, i = 0, len = 0;
2069
2070 do {
2071 memset(reg_offset, 0, 11);
2072 if (copy_from_user(reg_offset, buf + len,
2073 min(10, ((int)size-len)))) {
2074 ret = -EFAULT;
2075 goto error_free;
2076 }
2077
2078 new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
2079 if (!new) {
2080 ret = -ENOMEM;
2081 goto error_free;
2082 }
2083 tmp = new;
2084 if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
2085 ret = -EINVAL;
2086 goto error_free;
2087 }
2088
2089 len += ret;
2090 i++;
2091 } while (len < size);
2092
2093 new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
2094 if (!new) {
2095 ret = -ENOMEM;
2096 goto error_free;
2097 }
2098 ret = down_write_killable(&adev->reset_domain->sem);
2099 if (ret)
2100 goto error_free;
2101
2102 swap(adev->reset_info.reset_dump_reg_list, tmp);
2103 swap(adev->reset_info.reset_dump_reg_value, new);
2104 adev->reset_info.num_regs = i;
2105 up_write(&adev->reset_domain->sem);
2106 ret = size;
2107
2108error_free:
2109 if (tmp != new)
2110 kfree(tmp);
2111 kfree(new);
2112 return ret;
2113}
2114
2115static const struct file_operations amdgpu_reset_dump_register_list = {
2116 .owner = THIS_MODULE,
2117 .read = amdgpu_reset_dump_register_list_read,
2118 .write = amdgpu_reset_dump_register_list_write,
2119 .llseek = default_llseek
2120};
2121
2122int amdgpu_debugfs_init(struct amdgpu_device *adev)
2123{
2124 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
2125 struct dentry *ent;
2126 int r, i;
2127
2128 if (!debugfs_initialized())
2129 return 0;
2130
2131 debugfs_create_x32("amdgpu_smu_debug", 0600, root,
2132 &adev->pm.smu_debug_mask);
2133
2134 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
2135 &fops_ib_preempt);
2136 if (IS_ERR(ent)) {
2137 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
2138 return PTR_ERR(ent);
2139 }
2140
2141 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
2142 &fops_sclk_set);
2143 if (IS_ERR(ent)) {
2144 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
2145 return PTR_ERR(ent);
2146 }
2147
2148 /* Register debugfs entries for amdgpu_ttm */
2149 amdgpu_ttm_debugfs_init(adev);
2150 amdgpu_debugfs_pm_init(adev);
2151 amdgpu_debugfs_sa_init(adev);
2152 amdgpu_debugfs_fence_init(adev);
2153 amdgpu_debugfs_gem_init(adev);
2154
2155 r = amdgpu_debugfs_regs_init(adev);
2156 if (r)
2157 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2158
2159 amdgpu_debugfs_firmware_init(adev);
2160 amdgpu_ta_if_debugfs_init(adev);
2161
2162 amdgpu_debugfs_mes_event_log_init(adev);
2163
2164#if defined(CONFIG_DRM_AMD_DC)
2165 if (adev->dc_enabled)
2166 dtn_debugfs_init(adev);
2167#endif
2168
2169 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2170 struct amdgpu_ring *ring = adev->rings[i];
2171
2172 if (!ring)
2173 continue;
2174
2175 amdgpu_debugfs_ring_init(adev, ring);
2176 }
2177
2178 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2179 if (!amdgpu_vcnfw_log)
2180 break;
2181
2182 if (adev->vcn.harvest_config & (1 << i))
2183 continue;
2184
2185 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]);
2186 }
2187
2188 amdgpu_ras_debugfs_create_all(adev);
2189 amdgpu_rap_debugfs_init(adev);
2190 amdgpu_securedisplay_debugfs_init(adev);
2191 amdgpu_fw_attestation_debugfs_init(adev);
2192
2193 debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
2194 &amdgpu_evict_vram_fops);
2195 debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
2196 &amdgpu_evict_gtt_fops);
2197 debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
2198 &amdgpu_debugfs_test_ib_fops);
2199 debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
2200 &amdgpu_debugfs_vm_info_fops);
2201 debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
2202 &amdgpu_benchmark_fops);
2203 debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
2204 &amdgpu_reset_dump_register_list);
2205
2206 adev->debugfs_vbios_blob.data = adev->bios;
2207 adev->debugfs_vbios_blob.size = adev->bios_size;
2208 debugfs_create_blob("amdgpu_vbios", 0444, root,
2209 &adev->debugfs_vbios_blob);
2210
2211 adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
2212 adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
2213 debugfs_create_blob("amdgpu_discovery", 0444, root,
2214 &adev->debugfs_discovery_blob);
2215
2216 return 0;
2217}
2218
2219#else
2220int amdgpu_debugfs_init(struct amdgpu_device *adev)
2221{
2222 return 0;
2223}
2224int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2225{
2226 return 0;
2227}
2228#endif
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
30
31#include "amdgpu.h"
32#include "amdgpu_pm.h"
33#include "amdgpu_dm_debugfs.h"
34#include "amdgpu_ras.h"
35#include "amdgpu_rap.h"
36#include "amdgpu_securedisplay.h"
37#include "amdgpu_fw_attestation.h"
38#include "amdgpu_umr.h"
39
40#include "amdgpu_reset.h"
41#include "amdgpu_psp_ta.h"
42
43#if defined(CONFIG_DEBUG_FS)
44
45/**
46 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
47 *
48 * @read: True if reading
49 * @f: open file handle
50 * @buf: User buffer to write/read to
51 * @size: Number of bytes to write/read
52 * @pos: Offset to seek to
53 *
54 * This debugfs entry has special meaning on the offset being sought.
55 * Various bits have different meanings:
56 *
57 * Bit 62: Indicates a GRBM bank switch is needed
58 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
59 * zero)
60 * Bits 24..33: The SE or ME selector if needed
61 * Bits 34..43: The SH (or SA) or PIPE selector if needed
62 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
63 *
64 * Bit 23: Indicates that the PM power gating lock should be held
65 * This is necessary to read registers that might be
66 * unreliable during a power gating transistion.
67 *
68 * The lower bits are the BYTE offset of the register to read. This
69 * allows reading multiple registers in a single call and having
70 * the returned size reflect that.
71 */
72static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
73 char __user *buf, size_t size, loff_t *pos)
74{
75 struct amdgpu_device *adev = file_inode(f)->i_private;
76 ssize_t result = 0;
77 int r;
78 bool pm_pg_lock, use_bank, use_ring;
79 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
80
81 pm_pg_lock = use_bank = use_ring = false;
82 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
83
84 if (size & 0x3 || *pos & 0x3 ||
85 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
86 return -EINVAL;
87
88 /* are we reading registers for which a PG lock is necessary? */
89 pm_pg_lock = (*pos >> 23) & 1;
90
91 if (*pos & (1ULL << 62)) {
92 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
93 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
94 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
95
96 if (se_bank == 0x3FF)
97 se_bank = 0xFFFFFFFF;
98 if (sh_bank == 0x3FF)
99 sh_bank = 0xFFFFFFFF;
100 if (instance_bank == 0x3FF)
101 instance_bank = 0xFFFFFFFF;
102 use_bank = true;
103 } else if (*pos & (1ULL << 61)) {
104
105 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
106 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
107 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
108 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
109
110 use_ring = true;
111 } else {
112 use_bank = use_ring = false;
113 }
114
115 *pos &= (1UL << 22) - 1;
116
117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
118 if (r < 0) {
119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
120 return r;
121 }
122
123 r = amdgpu_virt_enable_access_debugfs(adev);
124 if (r < 0) {
125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
126 return r;
127 }
128
129 if (use_bank) {
130 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
131 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
132 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
133 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
134 amdgpu_virt_disable_access_debugfs(adev);
135 return -EINVAL;
136 }
137 mutex_lock(&adev->grbm_idx_mutex);
138 amdgpu_gfx_select_se_sh(adev, se_bank,
139 sh_bank, instance_bank);
140 } else if (use_ring) {
141 mutex_lock(&adev->srbm_mutex);
142 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
143 }
144
145 if (pm_pg_lock)
146 mutex_lock(&adev->pm.mutex);
147
148 while (size) {
149 uint32_t value;
150
151 if (read) {
152 value = RREG32(*pos >> 2);
153 r = put_user(value, (uint32_t *)buf);
154 } else {
155 r = get_user(value, (uint32_t *)buf);
156 if (!r)
157 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
158 }
159 if (r) {
160 result = r;
161 goto end;
162 }
163
164 result += 4;
165 buf += 4;
166 *pos += 4;
167 size -= 4;
168 }
169
170end:
171 if (use_bank) {
172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
173 mutex_unlock(&adev->grbm_idx_mutex);
174 } else if (use_ring) {
175 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
176 mutex_unlock(&adev->srbm_mutex);
177 }
178
179 if (pm_pg_lock)
180 mutex_unlock(&adev->pm.mutex);
181
182 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
183 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
184
185 amdgpu_virt_disable_access_debugfs(adev);
186 return result;
187}
188
189/*
190 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
191 */
192static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
193 size_t size, loff_t *pos)
194{
195 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
196}
197
198/*
199 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
200 */
201static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
202 size_t size, loff_t *pos)
203{
204 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
205}
206
207static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
208{
209 struct amdgpu_debugfs_regs2_data *rd;
210
211 rd = kzalloc(sizeof *rd, GFP_KERNEL);
212 if (!rd)
213 return -ENOMEM;
214 rd->adev = file_inode(file)->i_private;
215 file->private_data = rd;
216 mutex_init(&rd->lock);
217
218 return 0;
219}
220
221static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
222{
223 struct amdgpu_debugfs_regs2_data *rd = file->private_data;
224 mutex_destroy(&rd->lock);
225 kfree(file->private_data);
226 return 0;
227}
228
229static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
230{
231 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
232 struct amdgpu_device *adev = rd->adev;
233 ssize_t result = 0;
234 int r;
235 uint32_t value;
236
237 if (size & 0x3 || offset & 0x3)
238 return -EINVAL;
239
240 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
241 if (r < 0) {
242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
243 return r;
244 }
245
246 r = amdgpu_virt_enable_access_debugfs(adev);
247 if (r < 0) {
248 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
249 return r;
250 }
251
252 mutex_lock(&rd->lock);
253
254 if (rd->id.use_grbm) {
255 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
256 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
257 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
258 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
259 amdgpu_virt_disable_access_debugfs(adev);
260 mutex_unlock(&rd->lock);
261 return -EINVAL;
262 }
263 mutex_lock(&adev->grbm_idx_mutex);
264 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
265 rd->id.grbm.sh,
266 rd->id.grbm.instance);
267 }
268
269 if (rd->id.use_srbm) {
270 mutex_lock(&adev->srbm_mutex);
271 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
272 rd->id.srbm.queue, rd->id.srbm.vmid);
273 }
274
275 if (rd->id.pg_lock)
276 mutex_lock(&adev->pm.mutex);
277
278 while (size) {
279 if (!write_en) {
280 value = RREG32(offset >> 2);
281 r = put_user(value, (uint32_t *)buf);
282 } else {
283 r = get_user(value, (uint32_t *)buf);
284 if (!r)
285 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
286 }
287 if (r) {
288 result = r;
289 goto end;
290 }
291 offset += 4;
292 size -= 4;
293 result += 4;
294 buf += 4;
295 }
296end:
297 if (rd->id.use_grbm) {
298 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
299 mutex_unlock(&adev->grbm_idx_mutex);
300 }
301
302 if (rd->id.use_srbm) {
303 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
304 mutex_unlock(&adev->srbm_mutex);
305 }
306
307 if (rd->id.pg_lock)
308 mutex_unlock(&adev->pm.mutex);
309
310 mutex_unlock(&rd->lock);
311
312 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
313 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
314
315 amdgpu_virt_disable_access_debugfs(adev);
316 return result;
317}
318
319static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
320{
321 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
322 int r;
323
324 switch (cmd) {
325 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
326 mutex_lock(&rd->lock);
327 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
328 mutex_unlock(&rd->lock);
329 return r ? -EINVAL : 0;
330 default:
331 return -EINVAL;
332 }
333 return 0;
334}
335
336static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
337{
338 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
339}
340
341static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
342{
343 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
344}
345
346
347/**
348 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
349 *
350 * @f: open file handle
351 * @buf: User buffer to store read data in
352 * @size: Number of bytes to read
353 * @pos: Offset to seek to
354 *
355 * The lower bits are the BYTE offset of the register to read. This
356 * allows reading multiple registers in a single call and having
357 * the returned size reflect that.
358 */
359static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
360 size_t size, loff_t *pos)
361{
362 struct amdgpu_device *adev = file_inode(f)->i_private;
363 ssize_t result = 0;
364 int r;
365
366 if (size & 0x3 || *pos & 0x3)
367 return -EINVAL;
368
369 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
370 if (r < 0) {
371 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
372 return r;
373 }
374
375 r = amdgpu_virt_enable_access_debugfs(adev);
376 if (r < 0) {
377 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
378 return r;
379 }
380
381 while (size) {
382 uint32_t value;
383
384 value = RREG32_PCIE(*pos);
385 r = put_user(value, (uint32_t *)buf);
386 if (r)
387 goto out;
388
389 result += 4;
390 buf += 4;
391 *pos += 4;
392 size -= 4;
393 }
394
395 r = result;
396out:
397 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
398 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
399 amdgpu_virt_disable_access_debugfs(adev);
400 return r;
401}
402
403/**
404 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
405 *
406 * @f: open file handle
407 * @buf: User buffer to write data from
408 * @size: Number of bytes to write
409 * @pos: Offset to seek to
410 *
411 * The lower bits are the BYTE offset of the register to write. This
412 * allows writing multiple registers in a single call and having
413 * the returned size reflect that.
414 */
415static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
416 size_t size, loff_t *pos)
417{
418 struct amdgpu_device *adev = file_inode(f)->i_private;
419 ssize_t result = 0;
420 int r;
421
422 if (size & 0x3 || *pos & 0x3)
423 return -EINVAL;
424
425 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
426 if (r < 0) {
427 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
428 return r;
429 }
430
431 r = amdgpu_virt_enable_access_debugfs(adev);
432 if (r < 0) {
433 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
434 return r;
435 }
436
437 while (size) {
438 uint32_t value;
439
440 r = get_user(value, (uint32_t *)buf);
441 if (r)
442 goto out;
443
444 WREG32_PCIE(*pos, value);
445
446 result += 4;
447 buf += 4;
448 *pos += 4;
449 size -= 4;
450 }
451
452 r = result;
453out:
454 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
455 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
456 amdgpu_virt_disable_access_debugfs(adev);
457 return r;
458}
459
460/**
461 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
462 *
463 * @f: open file handle
464 * @buf: User buffer to store read data in
465 * @size: Number of bytes to read
466 * @pos: Offset to seek to
467 *
468 * The lower bits are the BYTE offset of the register to read. This
469 * allows reading multiple registers in a single call and having
470 * the returned size reflect that.
471 */
472static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
473 size_t size, loff_t *pos)
474{
475 struct amdgpu_device *adev = file_inode(f)->i_private;
476 ssize_t result = 0;
477 int r;
478
479 if (size & 0x3 || *pos & 0x3)
480 return -EINVAL;
481
482 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
483 if (r < 0) {
484 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
485 return r;
486 }
487
488 r = amdgpu_virt_enable_access_debugfs(adev);
489 if (r < 0) {
490 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
491 return r;
492 }
493
494 while (size) {
495 uint32_t value;
496
497 value = RREG32_DIDT(*pos >> 2);
498 r = put_user(value, (uint32_t *)buf);
499 if (r)
500 goto out;
501
502 result += 4;
503 buf += 4;
504 *pos += 4;
505 size -= 4;
506 }
507
508 r = result;
509out:
510 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
511 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
512 amdgpu_virt_disable_access_debugfs(adev);
513 return r;
514}
515
516/**
517 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
518 *
519 * @f: open file handle
520 * @buf: User buffer to write data from
521 * @size: Number of bytes to write
522 * @pos: Offset to seek to
523 *
524 * The lower bits are the BYTE offset of the register to write. This
525 * allows writing multiple registers in a single call and having
526 * the returned size reflect that.
527 */
528static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
529 size_t size, loff_t *pos)
530{
531 struct amdgpu_device *adev = file_inode(f)->i_private;
532 ssize_t result = 0;
533 int r;
534
535 if (size & 0x3 || *pos & 0x3)
536 return -EINVAL;
537
538 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
539 if (r < 0) {
540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
541 return r;
542 }
543
544 r = amdgpu_virt_enable_access_debugfs(adev);
545 if (r < 0) {
546 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
547 return r;
548 }
549
550 while (size) {
551 uint32_t value;
552
553 r = get_user(value, (uint32_t *)buf);
554 if (r)
555 goto out;
556
557 WREG32_DIDT(*pos >> 2, value);
558
559 result += 4;
560 buf += 4;
561 *pos += 4;
562 size -= 4;
563 }
564
565 r = result;
566out:
567 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
568 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
569 amdgpu_virt_disable_access_debugfs(adev);
570 return r;
571}
572
573/**
574 * amdgpu_debugfs_regs_smc_read - Read from a SMC register
575 *
576 * @f: open file handle
577 * @buf: User buffer to store read data in
578 * @size: Number of bytes to read
579 * @pos: Offset to seek to
580 *
581 * The lower bits are the BYTE offset of the register to read. This
582 * allows reading multiple registers in a single call and having
583 * the returned size reflect that.
584 */
585static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
586 size_t size, loff_t *pos)
587{
588 struct amdgpu_device *adev = file_inode(f)->i_private;
589 ssize_t result = 0;
590 int r;
591
592 if (size & 0x3 || *pos & 0x3)
593 return -EINVAL;
594
595 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
596 if (r < 0) {
597 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
598 return r;
599 }
600
601 r = amdgpu_virt_enable_access_debugfs(adev);
602 if (r < 0) {
603 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
604 return r;
605 }
606
607 while (size) {
608 uint32_t value;
609
610 value = RREG32_SMC(*pos);
611 r = put_user(value, (uint32_t *)buf);
612 if (r)
613 goto out;
614
615 result += 4;
616 buf += 4;
617 *pos += 4;
618 size -= 4;
619 }
620
621 r = result;
622out:
623 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
624 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
625 amdgpu_virt_disable_access_debugfs(adev);
626 return r;
627}
628
629/**
630 * amdgpu_debugfs_regs_smc_write - Write to a SMC register
631 *
632 * @f: open file handle
633 * @buf: User buffer to write data from
634 * @size: Number of bytes to write
635 * @pos: Offset to seek to
636 *
637 * The lower bits are the BYTE offset of the register to write. This
638 * allows writing multiple registers in a single call and having
639 * the returned size reflect that.
640 */
641static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
642 size_t size, loff_t *pos)
643{
644 struct amdgpu_device *adev = file_inode(f)->i_private;
645 ssize_t result = 0;
646 int r;
647
648 if (size & 0x3 || *pos & 0x3)
649 return -EINVAL;
650
651 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
652 if (r < 0) {
653 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
654 return r;
655 }
656
657 r = amdgpu_virt_enable_access_debugfs(adev);
658 if (r < 0) {
659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
660 return r;
661 }
662
663 while (size) {
664 uint32_t value;
665
666 r = get_user(value, (uint32_t *)buf);
667 if (r)
668 goto out;
669
670 WREG32_SMC(*pos, value);
671
672 result += 4;
673 buf += 4;
674 *pos += 4;
675 size -= 4;
676 }
677
678 r = result;
679out:
680 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
681 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
682 amdgpu_virt_disable_access_debugfs(adev);
683 return r;
684}
685
686/**
687 * amdgpu_debugfs_gca_config_read - Read from gfx config data
688 *
689 * @f: open file handle
690 * @buf: User buffer to store read data in
691 * @size: Number of bytes to read
692 * @pos: Offset to seek to
693 *
694 * This file is used to access configuration data in a somewhat
695 * stable fashion. The format is a series of DWORDs with the first
696 * indicating which revision it is. New content is appended to the
697 * end so that older software can still read the data.
698 */
699
700static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
701 size_t size, loff_t *pos)
702{
703 struct amdgpu_device *adev = file_inode(f)->i_private;
704 ssize_t result = 0;
705 int r;
706 uint32_t *config, no_regs = 0;
707
708 if (size & 0x3 || *pos & 0x3)
709 return -EINVAL;
710
711 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
712 if (!config)
713 return -ENOMEM;
714
715 /* version, increment each time something is added */
716 config[no_regs++] = 5;
717 config[no_regs++] = adev->gfx.config.max_shader_engines;
718 config[no_regs++] = adev->gfx.config.max_tile_pipes;
719 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
720 config[no_regs++] = adev->gfx.config.max_sh_per_se;
721 config[no_regs++] = adev->gfx.config.max_backends_per_se;
722 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
723 config[no_regs++] = adev->gfx.config.max_gprs;
724 config[no_regs++] = adev->gfx.config.max_gs_threads;
725 config[no_regs++] = adev->gfx.config.max_hw_contexts;
726 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
727 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
728 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
729 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
730 config[no_regs++] = adev->gfx.config.num_tile_pipes;
731 config[no_regs++] = adev->gfx.config.backend_enable_mask;
732 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
733 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
734 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
735 config[no_regs++] = adev->gfx.config.num_gpus;
736 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
737 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
738 config[no_regs++] = adev->gfx.config.gb_addr_config;
739 config[no_regs++] = adev->gfx.config.num_rbs;
740
741 /* rev==1 */
742 config[no_regs++] = adev->rev_id;
743 config[no_regs++] = lower_32_bits(adev->pg_flags);
744 config[no_regs++] = lower_32_bits(adev->cg_flags);
745
746 /* rev==2 */
747 config[no_regs++] = adev->family;
748 config[no_regs++] = adev->external_rev_id;
749
750 /* rev==3 */
751 config[no_regs++] = adev->pdev->device;
752 config[no_regs++] = adev->pdev->revision;
753 config[no_regs++] = adev->pdev->subsystem_device;
754 config[no_regs++] = adev->pdev->subsystem_vendor;
755
756 /* rev==4 APU flag */
757 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
758
759 /* rev==5 PG/CG flag upper 32bit */
760 config[no_regs++] = upper_32_bits(adev->pg_flags);
761 config[no_regs++] = upper_32_bits(adev->cg_flags);
762
763 while (size && (*pos < no_regs * 4)) {
764 uint32_t value;
765
766 value = config[*pos >> 2];
767 r = put_user(value, (uint32_t *)buf);
768 if (r) {
769 kfree(config);
770 return r;
771 }
772
773 result += 4;
774 buf += 4;
775 *pos += 4;
776 size -= 4;
777 }
778
779 kfree(config);
780 return result;
781}
782
783/**
784 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
785 *
786 * @f: open file handle
787 * @buf: User buffer to store read data in
788 * @size: Number of bytes to read
789 * @pos: Offset to seek to
790 *
791 * The offset is treated as the BYTE address of one of the sensors
792 * enumerated in amd/include/kgd_pp_interface.h under the
793 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
794 * you would use the offset 3 * 4 = 12.
795 */
796static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
797 size_t size, loff_t *pos)
798{
799 struct amdgpu_device *adev = file_inode(f)->i_private;
800 int idx, x, outsize, r, valuesize;
801 uint32_t values[16];
802
803 if (size & 3 || *pos & 0x3)
804 return -EINVAL;
805
806 if (!adev->pm.dpm_enabled)
807 return -EINVAL;
808
809 /* convert offset to sensor number */
810 idx = *pos >> 2;
811
812 valuesize = sizeof(values);
813
814 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
815 if (r < 0) {
816 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
817 return r;
818 }
819
820 r = amdgpu_virt_enable_access_debugfs(adev);
821 if (r < 0) {
822 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
823 return r;
824 }
825
826 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
827
828 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
829 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
830
831 if (r) {
832 amdgpu_virt_disable_access_debugfs(adev);
833 return r;
834 }
835
836 if (size > valuesize) {
837 amdgpu_virt_disable_access_debugfs(adev);
838 return -EINVAL;
839 }
840
841 outsize = 0;
842 x = 0;
843 if (!r) {
844 while (size) {
845 r = put_user(values[x++], (int32_t *)buf);
846 buf += 4;
847 size -= 4;
848 outsize += 4;
849 }
850 }
851
852 amdgpu_virt_disable_access_debugfs(adev);
853 return !r ? outsize : r;
854}
855
856/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
857 *
858 * @f: open file handle
859 * @buf: User buffer to store read data in
860 * @size: Number of bytes to read
861 * @pos: Offset to seek to
862 *
863 * The offset being sought changes which wave that the status data
864 * will be returned for. The bits are used as follows:
865 *
866 * Bits 0..6: Byte offset into data
867 * Bits 7..14: SE selector
868 * Bits 15..22: SH/SA selector
869 * Bits 23..30: CU/{WGP+SIMD} selector
870 * Bits 31..36: WAVE ID selector
871 * Bits 37..44: SIMD ID selector
872 *
873 * The returned data begins with one DWORD of version information
874 * Followed by WAVE STATUS registers relevant to the GFX IP version
875 * being used. See gfx_v8_0_read_wave_data() for an example output.
876 */
877static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
878 size_t size, loff_t *pos)
879{
880 struct amdgpu_device *adev = f->f_inode->i_private;
881 int r, x;
882 ssize_t result = 0;
883 uint32_t offset, se, sh, cu, wave, simd, data[32];
884
885 if (size & 3 || *pos & 3)
886 return -EINVAL;
887
888 /* decode offset */
889 offset = (*pos & GENMASK_ULL(6, 0));
890 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
891 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
892 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
893 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
894 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
895
896 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
897 if (r < 0) {
898 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
899 return r;
900 }
901
902 r = amdgpu_virt_enable_access_debugfs(adev);
903 if (r < 0) {
904 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
905 return r;
906 }
907
908 /* switch to the specific se/sh/cu */
909 mutex_lock(&adev->grbm_idx_mutex);
910 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
911
912 x = 0;
913 if (adev->gfx.funcs->read_wave_data)
914 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
915
916 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
917 mutex_unlock(&adev->grbm_idx_mutex);
918
919 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
920 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
921
922 if (!x) {
923 amdgpu_virt_disable_access_debugfs(adev);
924 return -EINVAL;
925 }
926
927 while (size && (offset < x * 4)) {
928 uint32_t value;
929
930 value = data[offset >> 2];
931 r = put_user(value, (uint32_t *)buf);
932 if (r) {
933 amdgpu_virt_disable_access_debugfs(adev);
934 return r;
935 }
936
937 result += 4;
938 buf += 4;
939 offset += 4;
940 size -= 4;
941 }
942
943 amdgpu_virt_disable_access_debugfs(adev);
944 return result;
945}
946
947/** amdgpu_debugfs_gpr_read - Read wave gprs
948 *
949 * @f: open file handle
950 * @buf: User buffer to store read data in
951 * @size: Number of bytes to read
952 * @pos: Offset to seek to
953 *
954 * The offset being sought changes which wave that the status data
955 * will be returned for. The bits are used as follows:
956 *
957 * Bits 0..11: Byte offset into data
958 * Bits 12..19: SE selector
959 * Bits 20..27: SH/SA selector
960 * Bits 28..35: CU/{WGP+SIMD} selector
961 * Bits 36..43: WAVE ID selector
962 * Bits 37..44: SIMD ID selector
963 * Bits 52..59: Thread selector
964 * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
965 *
966 * The return data comes from the SGPR or VGPR register bank for
967 * the selected operational unit.
968 */
969static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
970 size_t size, loff_t *pos)
971{
972 struct amdgpu_device *adev = f->f_inode->i_private;
973 int r;
974 ssize_t result = 0;
975 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
976
977 if (size > 4096 || size & 3 || *pos & 3)
978 return -EINVAL;
979
980 /* decode offset */
981 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
982 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
983 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
984 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
985 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
986 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
987 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
988 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
989
990 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
991 if (!data)
992 return -ENOMEM;
993
994 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
995 if (r < 0)
996 goto err;
997
998 r = amdgpu_virt_enable_access_debugfs(adev);
999 if (r < 0)
1000 goto err;
1001
1002 /* switch to the specific se/sh/cu */
1003 mutex_lock(&adev->grbm_idx_mutex);
1004 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
1005
1006 if (bank == 0) {
1007 if (adev->gfx.funcs->read_wave_vgprs)
1008 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
1009 } else {
1010 if (adev->gfx.funcs->read_wave_sgprs)
1011 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
1012 }
1013
1014 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
1015 mutex_unlock(&adev->grbm_idx_mutex);
1016
1017 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1018 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1019
1020 while (size) {
1021 uint32_t value;
1022
1023 value = data[result >> 2];
1024 r = put_user(value, (uint32_t *)buf);
1025 if (r) {
1026 amdgpu_virt_disable_access_debugfs(adev);
1027 goto err;
1028 }
1029
1030 result += 4;
1031 buf += 4;
1032 size -= 4;
1033 }
1034
1035 kfree(data);
1036 amdgpu_virt_disable_access_debugfs(adev);
1037 return result;
1038
1039err:
1040 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1041 kfree(data);
1042 return r;
1043}
1044
1045/**
1046 * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
1047 *
1048 * @f: open file handle
1049 * @buf: User buffer to store read data in
1050 * @size: Number of bytes to read
1051 * @pos: Offset to seek to
1052 *
1053 * Read the last residency value logged. It doesn't auto update, one needs to
1054 * stop logging before getting the current value.
1055 */
1056static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
1057 size_t size, loff_t *pos)
1058{
1059 struct amdgpu_device *adev = file_inode(f)->i_private;
1060 ssize_t result = 0;
1061 int r;
1062
1063 if (size & 0x3 || *pos & 0x3)
1064 return -EINVAL;
1065
1066 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1067 if (r < 0) {
1068 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1069 return r;
1070 }
1071
1072 while (size) {
1073 uint32_t value;
1074
1075 r = amdgpu_get_gfx_off_residency(adev, &value);
1076 if (r)
1077 goto out;
1078
1079 r = put_user(value, (uint32_t *)buf);
1080 if (r)
1081 goto out;
1082
1083 result += 4;
1084 buf += 4;
1085 *pos += 4;
1086 size -= 4;
1087 }
1088
1089 r = result;
1090out:
1091 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1092 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1093
1094 return r;
1095}
1096
1097/**
1098 * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
1099 *
1100 * @f: open file handle
1101 * @buf: User buffer to write data from
1102 * @size: Number of bytes to write
1103 * @pos: Offset to seek to
1104 *
1105 * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
1106 */
1107static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
1108 size_t size, loff_t *pos)
1109{
1110 struct amdgpu_device *adev = file_inode(f)->i_private;
1111 ssize_t result = 0;
1112 int r;
1113
1114 if (size & 0x3 || *pos & 0x3)
1115 return -EINVAL;
1116
1117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1118 if (r < 0) {
1119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1120 return r;
1121 }
1122
1123 while (size) {
1124 u32 value;
1125
1126 r = get_user(value, (uint32_t *)buf);
1127 if (r)
1128 goto out;
1129
1130 amdgpu_set_gfx_off_residency(adev, value ? true : false);
1131
1132 result += 4;
1133 buf += 4;
1134 *pos += 4;
1135 size -= 4;
1136 }
1137
1138 r = result;
1139out:
1140 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1141 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1142
1143 return r;
1144}
1145
1146
1147/**
1148 * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
1149 *
1150 * @f: open file handle
1151 * @buf: User buffer to store read data in
1152 * @size: Number of bytes to read
1153 * @pos: Offset to seek to
1154 */
1155static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
1156 size_t size, loff_t *pos)
1157{
1158 struct amdgpu_device *adev = file_inode(f)->i_private;
1159 ssize_t result = 0;
1160 int r;
1161
1162 if (size & 0x3 || *pos & 0x3)
1163 return -EINVAL;
1164
1165 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1166 if (r < 0) {
1167 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1168 return r;
1169 }
1170
1171 while (size) {
1172 u64 value = 0;
1173
1174 r = amdgpu_get_gfx_off_entrycount(adev, &value);
1175 if (r)
1176 goto out;
1177
1178 r = put_user(value, (u64 *)buf);
1179 if (r)
1180 goto out;
1181
1182 result += 4;
1183 buf += 4;
1184 *pos += 4;
1185 size -= 4;
1186 }
1187
1188 r = result;
1189out:
1190 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1191 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1192
1193 return r;
1194}
1195
1196/**
1197 * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
1198 *
1199 * @f: open file handle
1200 * @buf: User buffer to write data from
1201 * @size: Number of bytes to write
1202 * @pos: Offset to seek to
1203 *
1204 * Write a 32-bit zero to disable or a 32-bit non-zero to enable
1205 */
1206static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1207 size_t size, loff_t *pos)
1208{
1209 struct amdgpu_device *adev = file_inode(f)->i_private;
1210 ssize_t result = 0;
1211 int r;
1212
1213 if (size & 0x3 || *pos & 0x3)
1214 return -EINVAL;
1215
1216 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1217 if (r < 0) {
1218 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1219 return r;
1220 }
1221
1222 while (size) {
1223 uint32_t value;
1224
1225 r = get_user(value, (uint32_t *)buf);
1226 if (r)
1227 goto out;
1228
1229 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1230
1231 result += 4;
1232 buf += 4;
1233 *pos += 4;
1234 size -= 4;
1235 }
1236
1237 r = result;
1238out:
1239 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1240 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1241
1242 return r;
1243}
1244
1245
1246/**
1247 * amdgpu_debugfs_gfxoff_read - read gfxoff status
1248 *
1249 * @f: open file handle
1250 * @buf: User buffer to store read data in
1251 * @size: Number of bytes to read
1252 * @pos: Offset to seek to
1253 */
1254static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1255 size_t size, loff_t *pos)
1256{
1257 struct amdgpu_device *adev = file_inode(f)->i_private;
1258 ssize_t result = 0;
1259 int r;
1260
1261 if (size & 0x3 || *pos & 0x3)
1262 return -EINVAL;
1263
1264 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1265 if (r < 0) {
1266 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1267 return r;
1268 }
1269
1270 while (size) {
1271 u32 value = adev->gfx.gfx_off_state;
1272
1273 r = put_user(value, (u32 *)buf);
1274 if (r)
1275 goto out;
1276
1277 result += 4;
1278 buf += 4;
1279 *pos += 4;
1280 size -= 4;
1281 }
1282
1283 r = result;
1284out:
1285 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1286 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1287
1288 return r;
1289}
1290
1291static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf,
1292 size_t size, loff_t *pos)
1293{
1294 struct amdgpu_device *adev = file_inode(f)->i_private;
1295 ssize_t result = 0;
1296 int r;
1297
1298 if (size & 0x3 || *pos & 0x3)
1299 return -EINVAL;
1300
1301 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1302 if (r < 0) {
1303 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1304 return r;
1305 }
1306
1307 while (size) {
1308 u32 value;
1309
1310 r = amdgpu_get_gfx_off_status(adev, &value);
1311 if (r)
1312 goto out;
1313
1314 r = put_user(value, (u32 *)buf);
1315 if (r)
1316 goto out;
1317
1318 result += 4;
1319 buf += 4;
1320 *pos += 4;
1321 size -= 4;
1322 }
1323
1324 r = result;
1325out:
1326 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1327 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1328
1329 return r;
1330}
1331
1332static const struct file_operations amdgpu_debugfs_regs2_fops = {
1333 .owner = THIS_MODULE,
1334 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
1335 .read = amdgpu_debugfs_regs2_read,
1336 .write = amdgpu_debugfs_regs2_write,
1337 .open = amdgpu_debugfs_regs2_open,
1338 .release = amdgpu_debugfs_regs2_release,
1339 .llseek = default_llseek
1340};
1341
1342static const struct file_operations amdgpu_debugfs_regs_fops = {
1343 .owner = THIS_MODULE,
1344 .read = amdgpu_debugfs_regs_read,
1345 .write = amdgpu_debugfs_regs_write,
1346 .llseek = default_llseek
1347};
1348static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1349 .owner = THIS_MODULE,
1350 .read = amdgpu_debugfs_regs_didt_read,
1351 .write = amdgpu_debugfs_regs_didt_write,
1352 .llseek = default_llseek
1353};
1354static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1355 .owner = THIS_MODULE,
1356 .read = amdgpu_debugfs_regs_pcie_read,
1357 .write = amdgpu_debugfs_regs_pcie_write,
1358 .llseek = default_llseek
1359};
1360static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1361 .owner = THIS_MODULE,
1362 .read = amdgpu_debugfs_regs_smc_read,
1363 .write = amdgpu_debugfs_regs_smc_write,
1364 .llseek = default_llseek
1365};
1366
1367static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1368 .owner = THIS_MODULE,
1369 .read = amdgpu_debugfs_gca_config_read,
1370 .llseek = default_llseek
1371};
1372
1373static const struct file_operations amdgpu_debugfs_sensors_fops = {
1374 .owner = THIS_MODULE,
1375 .read = amdgpu_debugfs_sensor_read,
1376 .llseek = default_llseek
1377};
1378
1379static const struct file_operations amdgpu_debugfs_wave_fops = {
1380 .owner = THIS_MODULE,
1381 .read = amdgpu_debugfs_wave_read,
1382 .llseek = default_llseek
1383};
1384static const struct file_operations amdgpu_debugfs_gpr_fops = {
1385 .owner = THIS_MODULE,
1386 .read = amdgpu_debugfs_gpr_read,
1387 .llseek = default_llseek
1388};
1389
1390static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1391 .owner = THIS_MODULE,
1392 .read = amdgpu_debugfs_gfxoff_read,
1393 .write = amdgpu_debugfs_gfxoff_write,
1394 .llseek = default_llseek
1395};
1396
1397static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
1398 .owner = THIS_MODULE,
1399 .read = amdgpu_debugfs_gfxoff_status_read,
1400 .llseek = default_llseek
1401};
1402
1403static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
1404 .owner = THIS_MODULE,
1405 .read = amdgpu_debugfs_gfxoff_count_read,
1406 .llseek = default_llseek
1407};
1408
1409static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
1410 .owner = THIS_MODULE,
1411 .read = amdgpu_debugfs_gfxoff_residency_read,
1412 .write = amdgpu_debugfs_gfxoff_residency_write,
1413 .llseek = default_llseek
1414};
1415
1416static const struct file_operations *debugfs_regs[] = {
1417 &amdgpu_debugfs_regs_fops,
1418 &amdgpu_debugfs_regs2_fops,
1419 &amdgpu_debugfs_regs_didt_fops,
1420 &amdgpu_debugfs_regs_pcie_fops,
1421 &amdgpu_debugfs_regs_smc_fops,
1422 &amdgpu_debugfs_gca_config_fops,
1423 &amdgpu_debugfs_sensors_fops,
1424 &amdgpu_debugfs_wave_fops,
1425 &amdgpu_debugfs_gpr_fops,
1426 &amdgpu_debugfs_gfxoff_fops,
1427 &amdgpu_debugfs_gfxoff_status_fops,
1428 &amdgpu_debugfs_gfxoff_count_fops,
1429 &amdgpu_debugfs_gfxoff_residency_fops,
1430};
1431
1432static const char *debugfs_regs_names[] = {
1433 "amdgpu_regs",
1434 "amdgpu_regs2",
1435 "amdgpu_regs_didt",
1436 "amdgpu_regs_pcie",
1437 "amdgpu_regs_smc",
1438 "amdgpu_gca_config",
1439 "amdgpu_sensors",
1440 "amdgpu_wave",
1441 "amdgpu_gpr",
1442 "amdgpu_gfxoff",
1443 "amdgpu_gfxoff_status",
1444 "amdgpu_gfxoff_count",
1445 "amdgpu_gfxoff_residency",
1446};
1447
1448/**
1449 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
1450 * register access.
1451 *
1452 * @adev: The device to attach the debugfs entries to
1453 */
1454int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1455{
1456 struct drm_minor *minor = adev_to_drm(adev)->primary;
1457 struct dentry *ent, *root = minor->debugfs_root;
1458 unsigned int i;
1459
1460 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1461 ent = debugfs_create_file(debugfs_regs_names[i],
1462 S_IFREG | S_IRUGO, root,
1463 adev, debugfs_regs[i]);
1464 if (!i && !IS_ERR_OR_NULL(ent))
1465 i_size_write(ent->d_inode, adev->rmmio_size);
1466 }
1467
1468 return 0;
1469}
1470
1471static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
1472{
1473 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1474 struct drm_device *dev = adev_to_drm(adev);
1475 int r = 0, i;
1476
1477 r = pm_runtime_get_sync(dev->dev);
1478 if (r < 0) {
1479 pm_runtime_put_autosuspend(dev->dev);
1480 return r;
1481 }
1482
1483 /* Avoid accidently unparking the sched thread during GPU reset */
1484 r = down_write_killable(&adev->reset_domain->sem);
1485 if (r)
1486 return r;
1487
1488 /* hold on the scheduler */
1489 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1490 struct amdgpu_ring *ring = adev->rings[i];
1491
1492 if (!ring || !ring->sched.thread)
1493 continue;
1494 kthread_park(ring->sched.thread);
1495 }
1496
1497 seq_printf(m, "run ib test:\n");
1498 r = amdgpu_ib_ring_tests(adev);
1499 if (r)
1500 seq_printf(m, "ib ring tests failed (%d).\n", r);
1501 else
1502 seq_printf(m, "ib ring tests passed.\n");
1503
1504 /* go on the scheduler */
1505 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1506 struct amdgpu_ring *ring = adev->rings[i];
1507
1508 if (!ring || !ring->sched.thread)
1509 continue;
1510 kthread_unpark(ring->sched.thread);
1511 }
1512
1513 up_write(&adev->reset_domain->sem);
1514
1515 pm_runtime_mark_last_busy(dev->dev);
1516 pm_runtime_put_autosuspend(dev->dev);
1517
1518 return 0;
1519}
1520
1521static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
1522{
1523 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1524 struct drm_device *dev = adev_to_drm(adev);
1525 int r;
1526
1527 r = pm_runtime_get_sync(dev->dev);
1528 if (r < 0) {
1529 pm_runtime_put_autosuspend(dev->dev);
1530 return r;
1531 }
1532
1533 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
1534
1535 pm_runtime_mark_last_busy(dev->dev);
1536 pm_runtime_put_autosuspend(dev->dev);
1537
1538 return 0;
1539}
1540
1541
1542static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
1543{
1544 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1545 struct drm_device *dev = adev_to_drm(adev);
1546 int r;
1547
1548 r = pm_runtime_get_sync(dev->dev);
1549 if (r < 0) {
1550 pm_runtime_put_autosuspend(dev->dev);
1551 return r;
1552 }
1553
1554 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
1555
1556 pm_runtime_mark_last_busy(dev->dev);
1557 pm_runtime_put_autosuspend(dev->dev);
1558
1559 return 0;
1560}
1561
1562static int amdgpu_debugfs_benchmark(void *data, u64 val)
1563{
1564 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1565 struct drm_device *dev = adev_to_drm(adev);
1566 int r;
1567
1568 r = pm_runtime_get_sync(dev->dev);
1569 if (r < 0) {
1570 pm_runtime_put_autosuspend(dev->dev);
1571 return r;
1572 }
1573
1574 r = amdgpu_benchmark(adev, val);
1575
1576 pm_runtime_mark_last_busy(dev->dev);
1577 pm_runtime_put_autosuspend(dev->dev);
1578
1579 return r;
1580}
1581
1582static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
1583{
1584 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1585 struct drm_device *dev = adev_to_drm(adev);
1586 struct drm_file *file;
1587 int r;
1588
1589 r = mutex_lock_interruptible(&dev->filelist_mutex);
1590 if (r)
1591 return r;
1592
1593 list_for_each_entry(file, &dev->filelist, lhead) {
1594 struct amdgpu_fpriv *fpriv = file->driver_priv;
1595 struct amdgpu_vm *vm = &fpriv->vm;
1596
1597 seq_printf(m, "pid:%d\tProcess:%s ----------\n",
1598 vm->task_info.pid, vm->task_info.process_name);
1599 r = amdgpu_bo_reserve(vm->root.bo, true);
1600 if (r)
1601 break;
1602 amdgpu_debugfs_vm_bo_info(vm, m);
1603 amdgpu_bo_unreserve(vm->root.bo);
1604 }
1605
1606 mutex_unlock(&dev->filelist_mutex);
1607
1608 return r;
1609}
1610
1611DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
1612DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
1613DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
1614 NULL, "%lld\n");
1615DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
1616 NULL, "%lld\n");
1617DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark,
1618 "%lld\n");
1619
1620static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1621 struct dma_fence **fences)
1622{
1623 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1624 uint32_t sync_seq, last_seq;
1625
1626 last_seq = atomic_read(&ring->fence_drv.last_seq);
1627 sync_seq = ring->fence_drv.sync_seq;
1628
1629 last_seq &= drv->num_fences_mask;
1630 sync_seq &= drv->num_fences_mask;
1631
1632 do {
1633 struct dma_fence *fence, **ptr;
1634
1635 ++last_seq;
1636 last_seq &= drv->num_fences_mask;
1637 ptr = &drv->fences[last_seq];
1638
1639 fence = rcu_dereference_protected(*ptr, 1);
1640 RCU_INIT_POINTER(*ptr, NULL);
1641
1642 if (!fence)
1643 continue;
1644
1645 fences[last_seq] = fence;
1646
1647 } while (last_seq != sync_seq);
1648}
1649
1650static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1651 int length)
1652{
1653 int i;
1654 struct dma_fence *fence;
1655
1656 for (i = 0; i < length; i++) {
1657 fence = fences[i];
1658 if (!fence)
1659 continue;
1660 dma_fence_signal(fence);
1661 dma_fence_put(fence);
1662 }
1663}
1664
1665static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1666{
1667 struct drm_sched_job *s_job;
1668 struct dma_fence *fence;
1669
1670 spin_lock(&sched->job_list_lock);
1671 list_for_each_entry(s_job, &sched->pending_list, list) {
1672 fence = sched->ops->run_job(s_job);
1673 dma_fence_put(fence);
1674 }
1675 spin_unlock(&sched->job_list_lock);
1676}
1677
1678static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1679{
1680 struct amdgpu_job *job;
1681 struct drm_sched_job *s_job, *tmp;
1682 uint32_t preempt_seq;
1683 struct dma_fence *fence, **ptr;
1684 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1685 struct drm_gpu_scheduler *sched = &ring->sched;
1686 bool preempted = true;
1687
1688 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1689 return;
1690
1691 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1692 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1693 preempted = false;
1694 goto no_preempt;
1695 }
1696
1697 preempt_seq &= drv->num_fences_mask;
1698 ptr = &drv->fences[preempt_seq];
1699 fence = rcu_dereference_protected(*ptr, 1);
1700
1701no_preempt:
1702 spin_lock(&sched->job_list_lock);
1703 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
1704 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1705 /* remove job from ring_mirror_list */
1706 list_del_init(&s_job->list);
1707 sched->ops->free_job(s_job);
1708 continue;
1709 }
1710 job = to_amdgpu_job(s_job);
1711 if (preempted && (&job->hw_fence) == fence)
1712 /* mark the job as preempted */
1713 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1714 }
1715 spin_unlock(&sched->job_list_lock);
1716}
1717
1718static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1719{
1720 int r, resched, length;
1721 struct amdgpu_ring *ring;
1722 struct dma_fence **fences = NULL;
1723 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1724
1725 if (val >= AMDGPU_MAX_RINGS)
1726 return -EINVAL;
1727
1728 ring = adev->rings[val];
1729
1730 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1731 return -EINVAL;
1732
1733 /* the last preemption failed */
1734 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1735 return -EBUSY;
1736
1737 length = ring->fence_drv.num_fences_mask + 1;
1738 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1739 if (!fences)
1740 return -ENOMEM;
1741
1742 /* Avoid accidently unparking the sched thread during GPU reset */
1743 r = down_read_killable(&adev->reset_domain->sem);
1744 if (r)
1745 goto pro_end;
1746
1747 /* stop the scheduler */
1748 kthread_park(ring->sched.thread);
1749
1750 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1751
1752 /* preempt the IB */
1753 r = amdgpu_ring_preempt_ib(ring);
1754 if (r) {
1755 DRM_WARN("failed to preempt ring %d\n", ring->idx);
1756 goto failure;
1757 }
1758
1759 amdgpu_fence_process(ring);
1760
1761 if (atomic_read(&ring->fence_drv.last_seq) !=
1762 ring->fence_drv.sync_seq) {
1763 DRM_INFO("ring %d was preempted\n", ring->idx);
1764
1765 amdgpu_ib_preempt_mark_partial_job(ring);
1766
1767 /* swap out the old fences */
1768 amdgpu_ib_preempt_fences_swap(ring, fences);
1769
1770 amdgpu_fence_driver_force_completion(ring);
1771
1772 /* resubmit unfinished jobs */
1773 amdgpu_ib_preempt_job_recovery(&ring->sched);
1774
1775 /* wait for jobs finished */
1776 amdgpu_fence_wait_empty(ring);
1777
1778 /* signal the old fences */
1779 amdgpu_ib_preempt_signal_fences(fences, length);
1780 }
1781
1782failure:
1783 /* restart the scheduler */
1784 kthread_unpark(ring->sched.thread);
1785
1786 up_read(&adev->reset_domain->sem);
1787
1788 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1789
1790pro_end:
1791 kfree(fences);
1792
1793 return r;
1794}
1795
1796static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1797{
1798 int ret = 0;
1799 uint32_t max_freq, min_freq;
1800 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1801
1802 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1803 return -EINVAL;
1804
1805 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1806 if (ret < 0) {
1807 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1808 return ret;
1809 }
1810
1811 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
1812 if (ret == -EOPNOTSUPP) {
1813 ret = 0;
1814 goto out;
1815 }
1816 if (ret || val > max_freq || val < min_freq) {
1817 ret = -EINVAL;
1818 goto out;
1819 }
1820
1821 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
1822 if (ret)
1823 ret = -EINVAL;
1824
1825out:
1826 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1827 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1828
1829 return ret;
1830}
1831
1832DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
1833 amdgpu_debugfs_ib_preempt, "%llu\n");
1834
1835DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
1836 amdgpu_debugfs_sclk_set, "%llu\n");
1837
1838static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
1839 char __user *buf, size_t size, loff_t *pos)
1840{
1841 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
1842 char reg_offset[12];
1843 int i, ret, len = 0;
1844
1845 if (*pos)
1846 return 0;
1847
1848 memset(reg_offset, 0, 12);
1849 ret = down_read_killable(&adev->reset_domain->sem);
1850 if (ret)
1851 return ret;
1852
1853 for (i = 0; i < adev->num_regs; i++) {
1854 sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
1855 up_read(&adev->reset_domain->sem);
1856 if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
1857 return -EFAULT;
1858
1859 len += strlen(reg_offset);
1860 ret = down_read_killable(&adev->reset_domain->sem);
1861 if (ret)
1862 return ret;
1863 }
1864
1865 up_read(&adev->reset_domain->sem);
1866 *pos += len;
1867
1868 return len;
1869}
1870
1871static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
1872 const char __user *buf, size_t size, loff_t *pos)
1873{
1874 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
1875 char reg_offset[11];
1876 uint32_t *new = NULL, *tmp = NULL;
1877 int ret, i = 0, len = 0;
1878
1879 do {
1880 memset(reg_offset, 0, 11);
1881 if (copy_from_user(reg_offset, buf + len,
1882 min(10, ((int)size-len)))) {
1883 ret = -EFAULT;
1884 goto error_free;
1885 }
1886
1887 new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
1888 if (!new) {
1889 ret = -ENOMEM;
1890 goto error_free;
1891 }
1892 tmp = new;
1893 if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
1894 ret = -EINVAL;
1895 goto error_free;
1896 }
1897
1898 len += ret;
1899 i++;
1900 } while (len < size);
1901
1902 new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
1903 if (!new) {
1904 ret = -ENOMEM;
1905 goto error_free;
1906 }
1907 ret = down_write_killable(&adev->reset_domain->sem);
1908 if (ret)
1909 goto error_free;
1910
1911 swap(adev->reset_dump_reg_list, tmp);
1912 swap(adev->reset_dump_reg_value, new);
1913 adev->num_regs = i;
1914 up_write(&adev->reset_domain->sem);
1915 ret = size;
1916
1917error_free:
1918 if (tmp != new)
1919 kfree(tmp);
1920 kfree(new);
1921 return ret;
1922}
1923
1924static const struct file_operations amdgpu_reset_dump_register_list = {
1925 .owner = THIS_MODULE,
1926 .read = amdgpu_reset_dump_register_list_read,
1927 .write = amdgpu_reset_dump_register_list_write,
1928 .llseek = default_llseek
1929};
1930
1931int amdgpu_debugfs_init(struct amdgpu_device *adev)
1932{
1933 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
1934 struct dentry *ent;
1935 int r, i;
1936
1937 if (!debugfs_initialized())
1938 return 0;
1939
1940 debugfs_create_x32("amdgpu_smu_debug", 0600, root,
1941 &adev->pm.smu_debug_mask);
1942
1943 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
1944 &fops_ib_preempt);
1945 if (IS_ERR(ent)) {
1946 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1947 return PTR_ERR(ent);
1948 }
1949
1950 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
1951 &fops_sclk_set);
1952 if (IS_ERR(ent)) {
1953 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1954 return PTR_ERR(ent);
1955 }
1956
1957 /* Register debugfs entries for amdgpu_ttm */
1958 amdgpu_ttm_debugfs_init(adev);
1959 amdgpu_debugfs_pm_init(adev);
1960 amdgpu_debugfs_sa_init(adev);
1961 amdgpu_debugfs_fence_init(adev);
1962 amdgpu_debugfs_gem_init(adev);
1963
1964 r = amdgpu_debugfs_regs_init(adev);
1965 if (r)
1966 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1967
1968 amdgpu_debugfs_firmware_init(adev);
1969 amdgpu_ta_if_debugfs_init(adev);
1970
1971#if defined(CONFIG_DRM_AMD_DC)
1972 if (adev->dc_enabled)
1973 dtn_debugfs_init(adev);
1974#endif
1975
1976 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1977 struct amdgpu_ring *ring = adev->rings[i];
1978
1979 if (!ring)
1980 continue;
1981
1982 amdgpu_debugfs_ring_init(adev, ring);
1983 }
1984
1985 for ( i = 0; i < adev->vcn.num_vcn_inst; i++) {
1986 if (!amdgpu_vcnfw_log)
1987 break;
1988
1989 if (adev->vcn.harvest_config & (1 << i))
1990 continue;
1991
1992 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]);
1993 }
1994
1995 amdgpu_ras_debugfs_create_all(adev);
1996 amdgpu_rap_debugfs_init(adev);
1997 amdgpu_securedisplay_debugfs_init(adev);
1998 amdgpu_fw_attestation_debugfs_init(adev);
1999
2000 debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
2001 &amdgpu_evict_vram_fops);
2002 debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
2003 &amdgpu_evict_gtt_fops);
2004 debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
2005 &amdgpu_debugfs_test_ib_fops);
2006 debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
2007 &amdgpu_debugfs_vm_info_fops);
2008 debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
2009 &amdgpu_benchmark_fops);
2010 debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
2011 &amdgpu_reset_dump_register_list);
2012
2013 adev->debugfs_vbios_blob.data = adev->bios;
2014 adev->debugfs_vbios_blob.size = adev->bios_size;
2015 debugfs_create_blob("amdgpu_vbios", 0444, root,
2016 &adev->debugfs_vbios_blob);
2017
2018 adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
2019 adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
2020 debugfs_create_blob("amdgpu_discovery", 0444, root,
2021 &adev->debugfs_discovery_blob);
2022
2023 return 0;
2024}
2025
2026#else
2027int amdgpu_debugfs_init(struct amdgpu_device *adev)
2028{
2029 return 0;
2030}
2031int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2032{
2033 return 0;
2034}
2035#endif