Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#ifdef pr_fmt
32#undef pr_fmt
33#endif
34
35#define pr_fmt(fmt) "amdgpu: " fmt
36
37#ifdef dev_fmt
38#undef dev_fmt
39#endif
40
41#define dev_fmt(fmt) "amdgpu: " fmt
42
43#include "amdgpu_ctx.h"
44
45#include <linux/atomic.h>
46#include <linux/wait.h>
47#include <linux/list.h>
48#include <linux/kref.h>
49#include <linux/rbtree.h>
50#include <linux/hashtable.h>
51#include <linux/dma-fence.h>
52#include <linux/pci.h>
53
54#include <drm/ttm/ttm_bo.h>
55#include <drm/ttm/ttm_placement.h>
56
57#include <drm/amdgpu_drm.h>
58#include <drm/drm_gem.h>
59#include <drm/drm_ioctl.h>
60
61#include <kgd_kfd_interface.h>
62#include "dm_pp_interface.h"
63#include "kgd_pp_interface.h"
64
65#include "amd_shared.h"
66#include "amdgpu_mode.h"
67#include "amdgpu_ih.h"
68#include "amdgpu_irq.h"
69#include "amdgpu_ucode.h"
70#include "amdgpu_ttm.h"
71#include "amdgpu_psp.h"
72#include "amdgpu_gds.h"
73#include "amdgpu_sync.h"
74#include "amdgpu_ring.h"
75#include "amdgpu_vm.h"
76#include "amdgpu_dpm.h"
77#include "amdgpu_acp.h"
78#include "amdgpu_uvd.h"
79#include "amdgpu_vce.h"
80#include "amdgpu_vcn.h"
81#include "amdgpu_jpeg.h"
82#include "amdgpu_vpe.h"
83#include "amdgpu_umsch_mm.h"
84#include "amdgpu_gmc.h"
85#include "amdgpu_gfx.h"
86#include "amdgpu_sdma.h"
87#include "amdgpu_lsdma.h"
88#include "amdgpu_nbio.h"
89#include "amdgpu_hdp.h"
90#include "amdgpu_dm.h"
91#include "amdgpu_virt.h"
92#include "amdgpu_csa.h"
93#include "amdgpu_mes_ctx.h"
94#include "amdgpu_gart.h"
95#include "amdgpu_debugfs.h"
96#include "amdgpu_job.h"
97#include "amdgpu_bo_list.h"
98#include "amdgpu_gem.h"
99#include "amdgpu_doorbell.h"
100#include "amdgpu_amdkfd.h"
101#include "amdgpu_discovery.h"
102#include "amdgpu_mes.h"
103#include "amdgpu_umc.h"
104#include "amdgpu_mmhub.h"
105#include "amdgpu_gfxhub.h"
106#include "amdgpu_df.h"
107#include "amdgpu_smuio.h"
108#include "amdgpu_fdinfo.h"
109#include "amdgpu_mca.h"
110#include "amdgpu_aca.h"
111#include "amdgpu_ras.h"
112#include "amdgpu_xcp.h"
113#include "amdgpu_seq64.h"
114#include "amdgpu_reg_state.h"
115
116#define MAX_GPU_INSTANCE 64
117
118struct amdgpu_gpu_instance {
119 struct amdgpu_device *adev;
120 int mgpu_fan_enabled;
121};
122
123struct amdgpu_mgpu_info {
124 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
125 struct mutex mutex;
126 uint32_t num_gpu;
127 uint32_t num_dgpu;
128 uint32_t num_apu;
129
130 /* delayed reset_func for XGMI configuration if necessary */
131 struct delayed_work delayed_reset_work;
132 bool pending_reset;
133};
134
135enum amdgpu_ss {
136 AMDGPU_SS_DRV_LOAD,
137 AMDGPU_SS_DEV_D0,
138 AMDGPU_SS_DEV_D3,
139 AMDGPU_SS_DRV_UNLOAD
140};
141
142struct amdgpu_watchdog_timer {
143 bool timeout_fatal_disable;
144 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
145};
146
147#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
148
149/*
150 * Modules parameters.
151 */
152extern int amdgpu_modeset;
153extern unsigned int amdgpu_vram_limit;
154extern int amdgpu_vis_vram_limit;
155extern int amdgpu_gart_size;
156extern int amdgpu_gtt_size;
157extern int amdgpu_moverate;
158extern int amdgpu_audio;
159extern int amdgpu_disp_priority;
160extern int amdgpu_hw_i2c;
161extern int amdgpu_pcie_gen2;
162extern int amdgpu_msi;
163extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
164extern int amdgpu_dpm;
165extern int amdgpu_fw_load_type;
166extern int amdgpu_aspm;
167extern int amdgpu_runtime_pm;
168extern uint amdgpu_ip_block_mask;
169extern int amdgpu_bapm;
170extern int amdgpu_deep_color;
171extern int amdgpu_vm_size;
172extern int amdgpu_vm_block_size;
173extern int amdgpu_vm_fragment_size;
174extern int amdgpu_vm_fault_stop;
175extern int amdgpu_vm_debug;
176extern int amdgpu_vm_update_mode;
177extern int amdgpu_exp_hw_support;
178extern int amdgpu_dc;
179extern int amdgpu_sched_jobs;
180extern int amdgpu_sched_hw_submission;
181extern uint amdgpu_pcie_gen_cap;
182extern uint amdgpu_pcie_lane_cap;
183extern u64 amdgpu_cg_mask;
184extern uint amdgpu_pg_mask;
185extern uint amdgpu_sdma_phase_quantum;
186extern char *amdgpu_disable_cu;
187extern char *amdgpu_virtual_display;
188extern uint amdgpu_pp_feature_mask;
189extern uint amdgpu_force_long_training;
190extern int amdgpu_lbpw;
191extern int amdgpu_compute_multipipe;
192extern int amdgpu_gpu_recovery;
193extern int amdgpu_emu_mode;
194extern uint amdgpu_smu_memory_pool_size;
195extern int amdgpu_smu_pptable_id;
196extern uint amdgpu_dc_feature_mask;
197extern uint amdgpu_freesync_vid_mode;
198extern uint amdgpu_dc_debug_mask;
199extern uint amdgpu_dc_visual_confirm;
200extern int amdgpu_dm_abm_level;
201extern int amdgpu_backlight;
202extern int amdgpu_damage_clips;
203extern struct amdgpu_mgpu_info mgpu_info;
204extern int amdgpu_ras_enable;
205extern uint amdgpu_ras_mask;
206extern int amdgpu_bad_page_threshold;
207extern bool amdgpu_ignore_bad_page_threshold;
208extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
209extern int amdgpu_async_gfx_ring;
210extern int amdgpu_mcbp;
211extern int amdgpu_discovery;
212extern int amdgpu_mes;
213extern int amdgpu_mes_log_enable;
214extern int amdgpu_mes_kiq;
215extern int amdgpu_noretry;
216extern int amdgpu_force_asic_type;
217extern int amdgpu_smartshift_bias;
218extern int amdgpu_use_xgmi_p2p;
219extern int amdgpu_mtype_local;
220extern bool enforce_isolation;
221#ifdef CONFIG_HSA_AMD
222extern int sched_policy;
223extern bool debug_evictions;
224extern bool no_system_mem_limit;
225extern int halt_if_hws_hang;
226#else
227static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
228static const bool __maybe_unused debug_evictions; /* = false */
229static const bool __maybe_unused no_system_mem_limit;
230static const int __maybe_unused halt_if_hws_hang;
231#endif
232#ifdef CONFIG_HSA_AMD_P2P
233extern bool pcie_p2p;
234#endif
235
236extern int amdgpu_tmz;
237extern int amdgpu_reset_method;
238
239#ifdef CONFIG_DRM_AMDGPU_SI
240extern int amdgpu_si_support;
241#endif
242#ifdef CONFIG_DRM_AMDGPU_CIK
243extern int amdgpu_cik_support;
244#endif
245extern int amdgpu_num_kcq;
246
247#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
248extern int amdgpu_vcnfw_log;
249extern int amdgpu_sg_display;
250extern int amdgpu_umsch_mm;
251extern int amdgpu_seamless;
252
253extern int amdgpu_user_partt_mode;
254extern int amdgpu_agp;
255
256extern int amdgpu_wbrf;
257
258#define AMDGPU_VM_MAX_NUM_CTX 4096
259#define AMDGPU_SG_THRESHOLD (256*1024*1024)
260#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
261#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
262#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
263#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
264#define AMDGPUFB_CONN_LIMIT 4
265#define AMDGPU_BIOS_NUM_SCRATCH 16
266
267#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
268
269/* hard reset data */
270#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
271
272/* reset flags */
273#define AMDGPU_RESET_GFX (1 << 0)
274#define AMDGPU_RESET_COMPUTE (1 << 1)
275#define AMDGPU_RESET_DMA (1 << 2)
276#define AMDGPU_RESET_CP (1 << 3)
277#define AMDGPU_RESET_GRBM (1 << 4)
278#define AMDGPU_RESET_DMA1 (1 << 5)
279#define AMDGPU_RESET_RLC (1 << 6)
280#define AMDGPU_RESET_SEM (1 << 7)
281#define AMDGPU_RESET_IH (1 << 8)
282#define AMDGPU_RESET_VMC (1 << 9)
283#define AMDGPU_RESET_MC (1 << 10)
284#define AMDGPU_RESET_DISPLAY (1 << 11)
285#define AMDGPU_RESET_UVD (1 << 12)
286#define AMDGPU_RESET_VCE (1 << 13)
287#define AMDGPU_RESET_VCE1 (1 << 14)
288
289/* max cursor sizes (in pixels) */
290#define CIK_CURSOR_WIDTH 128
291#define CIK_CURSOR_HEIGHT 128
292
293/* smart shift bias level limits */
294#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
295#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
296
297/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
298#define AMDGPU_SWCTF_EXTRA_DELAY 50
299
300struct amdgpu_xcp_mgr;
301struct amdgpu_device;
302struct amdgpu_irq_src;
303struct amdgpu_fpriv;
304struct amdgpu_bo_va_mapping;
305struct kfd_vm_fault_info;
306struct amdgpu_hive_info;
307struct amdgpu_reset_context;
308struct amdgpu_reset_control;
309
310enum amdgpu_cp_irq {
311 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
312 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
313 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
314 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
315 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
316 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
317 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
318 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
319 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
320 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
321
322 AMDGPU_CP_IRQ_LAST
323};
324
325enum amdgpu_thermal_irq {
326 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
327 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
328
329 AMDGPU_THERMAL_IRQ_LAST
330};
331
332enum amdgpu_kiq_irq {
333 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
334 AMDGPU_CP_KIQ_IRQ_LAST
335};
336#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
337#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
338#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
339#define MAX_KIQ_REG_TRY 1000
340
341int amdgpu_device_ip_set_clockgating_state(void *dev,
342 enum amd_ip_block_type block_type,
343 enum amd_clockgating_state state);
344int amdgpu_device_ip_set_powergating_state(void *dev,
345 enum amd_ip_block_type block_type,
346 enum amd_powergating_state state);
347void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
348 u64 *flags);
349int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
350 enum amd_ip_block_type block_type);
351bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
352 enum amd_ip_block_type block_type);
353
354#define AMDGPU_MAX_IP_NUM 16
355
356struct amdgpu_ip_block_status {
357 bool valid;
358 bool sw;
359 bool hw;
360 bool late_initialized;
361 bool hang;
362};
363
364struct amdgpu_ip_block_version {
365 const enum amd_ip_block_type type;
366 const u32 major;
367 const u32 minor;
368 const u32 rev;
369 const struct amd_ip_funcs *funcs;
370};
371
372struct amdgpu_ip_block {
373 struct amdgpu_ip_block_status status;
374 const struct amdgpu_ip_block_version *version;
375};
376
377int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
378 enum amd_ip_block_type type,
379 u32 major, u32 minor);
380
381struct amdgpu_ip_block *
382amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
383 enum amd_ip_block_type type);
384
385int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
386 const struct amdgpu_ip_block_version *ip_block_version);
387
388/*
389 * BIOS.
390 */
391bool amdgpu_get_bios(struct amdgpu_device *adev);
392bool amdgpu_read_bios(struct amdgpu_device *adev);
393bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
394 u8 *bios, u32 length_bytes);
395/*
396 * Clocks
397 */
398
399#define AMDGPU_MAX_PPLL 3
400
401struct amdgpu_clock {
402 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
403 struct amdgpu_pll spll;
404 struct amdgpu_pll mpll;
405 /* 10 Khz units */
406 uint32_t default_mclk;
407 uint32_t default_sclk;
408 uint32_t default_dispclk;
409 uint32_t current_dispclk;
410 uint32_t dp_extclk;
411 uint32_t max_pixel_clock;
412};
413
414/* sub-allocation manager, it has to be protected by another lock.
415 * By conception this is an helper for other part of the driver
416 * like the indirect buffer or semaphore, which both have their
417 * locking.
418 *
419 * Principe is simple, we keep a list of sub allocation in offset
420 * order (first entry has offset == 0, last entry has the highest
421 * offset).
422 *
423 * When allocating new object we first check if there is room at
424 * the end total_size - (last_object_offset + last_object_size) >=
425 * alloc_size. If so we allocate new object there.
426 *
427 * When there is not enough room at the end, we start waiting for
428 * each sub object until we reach object_offset+object_size >=
429 * alloc_size, this object then become the sub object we return.
430 *
431 * Alignment can't be bigger than page size.
432 *
433 * Hole are not considered for allocation to keep things simple.
434 * Assumption is that there won't be hole (all object on same
435 * alignment).
436 */
437
438struct amdgpu_sa_manager {
439 struct drm_suballoc_manager base;
440 struct amdgpu_bo *bo;
441 uint64_t gpu_addr;
442 void *cpu_ptr;
443};
444
445int amdgpu_fence_slab_init(void);
446void amdgpu_fence_slab_fini(void);
447
448/*
449 * IRQS.
450 */
451
452struct amdgpu_flip_work {
453 struct delayed_work flip_work;
454 struct work_struct unpin_work;
455 struct amdgpu_device *adev;
456 int crtc_id;
457 u32 target_vblank;
458 uint64_t base;
459 struct drm_pending_vblank_event *event;
460 struct amdgpu_bo *old_abo;
461 unsigned shared_count;
462 struct dma_fence **shared;
463 struct dma_fence_cb cb;
464 bool async;
465};
466
467
468/*
469 * file private structure
470 */
471
472struct amdgpu_fpriv {
473 struct amdgpu_vm vm;
474 struct amdgpu_bo_va *prt_va;
475 struct amdgpu_bo_va *csa_va;
476 struct amdgpu_bo_va *seq64_va;
477 struct mutex bo_list_lock;
478 struct idr bo_list_handles;
479 struct amdgpu_ctx_mgr ctx_mgr;
480 /** GPU partition selection */
481 uint32_t xcp_id;
482};
483
484int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
485
486/*
487 * Writeback
488 */
489#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
490
491struct amdgpu_wb {
492 struct amdgpu_bo *wb_obj;
493 volatile uint32_t *wb;
494 uint64_t gpu_addr;
495 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
496 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
497};
498
499int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
500void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
501
502/*
503 * Benchmarking
504 */
505int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
506
507/*
508 * ASIC specific register table accessible by UMD
509 */
510struct amdgpu_allowed_register_entry {
511 uint32_t reg_offset;
512 bool grbm_indexed;
513};
514
515/**
516 * enum amd_reset_method - Methods for resetting AMD GPU devices
517 *
518 * @AMD_RESET_METHOD_NONE: The device will not be reset.
519 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
520 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
521 * any device.
522 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
523 * individually. Suitable only for some discrete GPU, not
524 * available for all ASICs.
525 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
526 * are reset depends on the ASIC. Notably doesn't reset IPs
527 * shared with the CPU on APUs or the memory controllers (so
528 * VRAM is not lost). Not available on all ASICs.
529 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
530 * but without powering off the PCI bus. Suitable only for
531 * discrete GPUs.
532 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
533 * and does a secondary bus reset or FLR, depending on what the
534 * underlying hardware supports.
535 *
536 * Methods available for AMD GPU driver for resetting the device. Not all
537 * methods are suitable for every device. User can override the method using
538 * module parameter `reset_method`.
539 */
540enum amd_reset_method {
541 AMD_RESET_METHOD_NONE = -1,
542 AMD_RESET_METHOD_LEGACY = 0,
543 AMD_RESET_METHOD_MODE0,
544 AMD_RESET_METHOD_MODE1,
545 AMD_RESET_METHOD_MODE2,
546 AMD_RESET_METHOD_BACO,
547 AMD_RESET_METHOD_PCI,
548};
549
550struct amdgpu_video_codec_info {
551 u32 codec_type;
552 u32 max_width;
553 u32 max_height;
554 u32 max_pixels_per_frame;
555 u32 max_level;
556};
557
558#define codec_info_build(type, width, height, level) \
559 .codec_type = type,\
560 .max_width = width,\
561 .max_height = height,\
562 .max_pixels_per_frame = height * width,\
563 .max_level = level,
564
565struct amdgpu_video_codecs {
566 const u32 codec_count;
567 const struct amdgpu_video_codec_info *codec_array;
568};
569
570/*
571 * ASIC specific functions.
572 */
573struct amdgpu_asic_funcs {
574 bool (*read_disabled_bios)(struct amdgpu_device *adev);
575 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
576 u8 *bios, u32 length_bytes);
577 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
578 u32 sh_num, u32 reg_offset, u32 *value);
579 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
580 int (*reset)(struct amdgpu_device *adev);
581 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
582 /* get the reference clock */
583 u32 (*get_xclk)(struct amdgpu_device *adev);
584 /* MM block clocks */
585 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
586 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
587 /* static power management */
588 int (*get_pcie_lanes)(struct amdgpu_device *adev);
589 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
590 /* get config memsize register */
591 u32 (*get_config_memsize)(struct amdgpu_device *adev);
592 /* flush hdp write queue */
593 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
594 /* invalidate hdp read cache */
595 void (*invalidate_hdp)(struct amdgpu_device *adev,
596 struct amdgpu_ring *ring);
597 /* check if the asic needs a full reset of if soft reset will work */
598 bool (*need_full_reset)(struct amdgpu_device *adev);
599 /* initialize doorbell layout for specific asic*/
600 void (*init_doorbell_index)(struct amdgpu_device *adev);
601 /* PCIe bandwidth usage */
602 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
603 uint64_t *count1);
604 /* do we need to reset the asic at init time (e.g., kexec) */
605 bool (*need_reset_on_init)(struct amdgpu_device *adev);
606 /* PCIe replay counter */
607 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
608 /* device supports BACO */
609 bool (*supports_baco)(struct amdgpu_device *adev);
610 /* pre asic_init quirks */
611 void (*pre_asic_init)(struct amdgpu_device *adev);
612 /* enter/exit umd stable pstate */
613 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
614 /* query video codecs */
615 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
616 const struct amdgpu_video_codecs **codecs);
617 /* encode "> 32bits" smn addressing */
618 u64 (*encode_ext_smn_addressing)(int ext_id);
619
620 ssize_t (*get_reg_state)(struct amdgpu_device *adev,
621 enum amdgpu_reg_state reg_state, void *buf,
622 size_t max_size);
623};
624
625/*
626 * IOCTL.
627 */
628int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
629 struct drm_file *filp);
630
631int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
632int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
633 struct drm_file *filp);
634int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
635int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
636 struct drm_file *filp);
637
638/* VRAM scratch page for HDP bug, default vram page */
639struct amdgpu_mem_scratch {
640 struct amdgpu_bo *robj;
641 volatile uint32_t *ptr;
642 u64 gpu_addr;
643};
644
645/*
646 * CGS
647 */
648struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
649void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
650
651/*
652 * Core structure, functions and helpers.
653 */
654typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
655typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
656
657typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
658typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
659
660typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
661typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
662
663typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t);
664typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t);
665
666typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
667typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
668
669struct amdgpu_mmio_remap {
670 u32 reg_offset;
671 resource_size_t bus_addr;
672};
673
674/* Define the HW IP blocks will be used in driver , add more if necessary */
675enum amd_hw_ip_block_type {
676 GC_HWIP = 1,
677 HDP_HWIP,
678 SDMA0_HWIP,
679 SDMA1_HWIP,
680 SDMA2_HWIP,
681 SDMA3_HWIP,
682 SDMA4_HWIP,
683 SDMA5_HWIP,
684 SDMA6_HWIP,
685 SDMA7_HWIP,
686 LSDMA_HWIP,
687 MMHUB_HWIP,
688 ATHUB_HWIP,
689 NBIO_HWIP,
690 MP0_HWIP,
691 MP1_HWIP,
692 UVD_HWIP,
693 VCN_HWIP = UVD_HWIP,
694 JPEG_HWIP = VCN_HWIP,
695 VCN1_HWIP,
696 VCE_HWIP,
697 VPE_HWIP,
698 DF_HWIP,
699 DCE_HWIP,
700 OSSSYS_HWIP,
701 SMUIO_HWIP,
702 PWR_HWIP,
703 NBIF_HWIP,
704 THM_HWIP,
705 CLK_HWIP,
706 UMC_HWIP,
707 RSMU_HWIP,
708 XGMI_HWIP,
709 DCI_HWIP,
710 PCIE_HWIP,
711 MAX_HWIP
712};
713
714#define HWIP_MAX_INSTANCE 44
715
716#define HW_ID_MAX 300
717#define IP_VERSION_FULL(mj, mn, rv, var, srev) \
718 (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev))
719#define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0)
720#define IP_VERSION_MAJ(ver) ((ver) >> 24)
721#define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF)
722#define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF)
723#define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF)
724#define IP_VERSION_SUBREV(ver) ((ver) & 0xF)
725#define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8)
726
727struct amdgpu_ip_map_info {
728 /* Map of logical to actual dev instances/mask */
729 uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
730 int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
731 enum amd_hw_ip_block_type block,
732 int8_t inst);
733 uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
734 enum amd_hw_ip_block_type block,
735 uint32_t mask);
736};
737
738struct amd_powerplay {
739 void *pp_handle;
740 const struct amd_pm_funcs *pp_funcs;
741};
742
743struct ip_discovery_top;
744
745/* polaris10 kickers */
746#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
747 ((rid == 0xE3) || \
748 (rid == 0xE4) || \
749 (rid == 0xE5) || \
750 (rid == 0xE7) || \
751 (rid == 0xEF))) || \
752 ((did == 0x6FDF) && \
753 ((rid == 0xE7) || \
754 (rid == 0xEF) || \
755 (rid == 0xFF))))
756
757#define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \
758 ((rid == 0xE1) || \
759 (rid == 0xF7)))
760
761/* polaris11 kickers */
762#define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \
763 ((rid == 0xE0) || \
764 (rid == 0xE5))) || \
765 ((did == 0x67FF) && \
766 ((rid == 0xCF) || \
767 (rid == 0xEF) || \
768 (rid == 0xFF))))
769
770#define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \
771 ((rid == 0xE2)))
772
773/* polaris12 kickers */
774#define ASICID_IS_P23(did, rid) (((did == 0x6987) && \
775 ((rid == 0xC0) || \
776 (rid == 0xC1) || \
777 (rid == 0xC3) || \
778 (rid == 0xC7))) || \
779 ((did == 0x6981) && \
780 ((rid == 0x00) || \
781 (rid == 0x01) || \
782 (rid == 0x10))))
783
784struct amdgpu_mqd_prop {
785 uint64_t mqd_gpu_addr;
786 uint64_t hqd_base_gpu_addr;
787 uint64_t rptr_gpu_addr;
788 uint64_t wptr_gpu_addr;
789 uint32_t queue_size;
790 bool use_doorbell;
791 uint32_t doorbell_index;
792 uint64_t eop_gpu_addr;
793 uint32_t hqd_pipe_priority;
794 uint32_t hqd_queue_priority;
795 bool allow_tunneling;
796 bool hqd_active;
797};
798
799struct amdgpu_mqd {
800 unsigned mqd_size;
801 int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
802 struct amdgpu_mqd_prop *p);
803};
804
805#define AMDGPU_RESET_MAGIC_NUM 64
806#define AMDGPU_MAX_DF_PERFMONS 4
807struct amdgpu_reset_domain;
808struct amdgpu_fru_info;
809
810struct amdgpu_reset_info {
811 /* reset dump register */
812 u32 *reset_dump_reg_list;
813 u32 *reset_dump_reg_value;
814 int num_regs;
815
816#ifdef CONFIG_DEV_COREDUMP
817 struct amdgpu_coredump_info *coredump_info;
818#endif
819};
820
821/*
822 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
823 */
824#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
825
826struct amdgpu_device {
827 struct device *dev;
828 struct pci_dev *pdev;
829 struct drm_device ddev;
830
831#ifdef CONFIG_DRM_AMD_ACP
832 struct amdgpu_acp acp;
833#endif
834 struct amdgpu_hive_info *hive;
835 struct amdgpu_xcp_mgr *xcp_mgr;
836 /* ASIC */
837 enum amd_asic_type asic_type;
838 uint32_t family;
839 uint32_t rev_id;
840 uint32_t external_rev_id;
841 unsigned long flags;
842 unsigned long apu_flags;
843 int usec_timeout;
844 const struct amdgpu_asic_funcs *asic_funcs;
845 bool shutdown;
846 bool need_swiotlb;
847 bool accel_working;
848 struct notifier_block acpi_nb;
849 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
850 struct debugfs_blob_wrapper debugfs_vbios_blob;
851 struct debugfs_blob_wrapper debugfs_discovery_blob;
852 struct mutex srbm_mutex;
853 /* GRBM index mutex. Protects concurrent access to GRBM index */
854 struct mutex grbm_idx_mutex;
855 struct dev_pm_domain vga_pm_domain;
856 bool have_disp_power_ref;
857 bool have_atomics_support;
858
859 /* BIOS */
860 bool is_atom_fw;
861 uint8_t *bios;
862 uint32_t bios_size;
863 uint32_t bios_scratch_reg_offset;
864 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
865
866 /* Register/doorbell mmio */
867 resource_size_t rmmio_base;
868 resource_size_t rmmio_size;
869 void __iomem *rmmio;
870 /* protects concurrent MM_INDEX/DATA based register access */
871 spinlock_t mmio_idx_lock;
872 struct amdgpu_mmio_remap rmmio_remap;
873 /* protects concurrent SMC based register access */
874 spinlock_t smc_idx_lock;
875 amdgpu_rreg_t smc_rreg;
876 amdgpu_wreg_t smc_wreg;
877 /* protects concurrent PCIE register access */
878 spinlock_t pcie_idx_lock;
879 amdgpu_rreg_t pcie_rreg;
880 amdgpu_wreg_t pcie_wreg;
881 amdgpu_rreg_t pciep_rreg;
882 amdgpu_wreg_t pciep_wreg;
883 amdgpu_rreg_ext_t pcie_rreg_ext;
884 amdgpu_wreg_ext_t pcie_wreg_ext;
885 amdgpu_rreg64_t pcie_rreg64;
886 amdgpu_wreg64_t pcie_wreg64;
887 amdgpu_rreg64_ext_t pcie_rreg64_ext;
888 amdgpu_wreg64_ext_t pcie_wreg64_ext;
889 /* protects concurrent UVD register access */
890 spinlock_t uvd_ctx_idx_lock;
891 amdgpu_rreg_t uvd_ctx_rreg;
892 amdgpu_wreg_t uvd_ctx_wreg;
893 /* protects concurrent DIDT register access */
894 spinlock_t didt_idx_lock;
895 amdgpu_rreg_t didt_rreg;
896 amdgpu_wreg_t didt_wreg;
897 /* protects concurrent gc_cac register access */
898 spinlock_t gc_cac_idx_lock;
899 amdgpu_rreg_t gc_cac_rreg;
900 amdgpu_wreg_t gc_cac_wreg;
901 /* protects concurrent se_cac register access */
902 spinlock_t se_cac_idx_lock;
903 amdgpu_rreg_t se_cac_rreg;
904 amdgpu_wreg_t se_cac_wreg;
905 /* protects concurrent ENDPOINT (audio) register access */
906 spinlock_t audio_endpt_idx_lock;
907 amdgpu_block_rreg_t audio_endpt_rreg;
908 amdgpu_block_wreg_t audio_endpt_wreg;
909 struct amdgpu_doorbell doorbell;
910
911 /* clock/pll info */
912 struct amdgpu_clock clock;
913
914 /* MC */
915 struct amdgpu_gmc gmc;
916 struct amdgpu_gart gart;
917 dma_addr_t dummy_page_addr;
918 struct amdgpu_vm_manager vm_manager;
919 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
920 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
921
922 /* memory management */
923 struct amdgpu_mman mman;
924 struct amdgpu_mem_scratch mem_scratch;
925 struct amdgpu_wb wb;
926 atomic64_t num_bytes_moved;
927 atomic64_t num_evictions;
928 atomic64_t num_vram_cpu_page_faults;
929 atomic_t gpu_reset_counter;
930 atomic_t vram_lost_counter;
931
932 /* data for buffer migration throttling */
933 struct {
934 spinlock_t lock;
935 s64 last_update_us;
936 s64 accum_us; /* accumulated microseconds */
937 s64 accum_us_vis; /* for visible VRAM */
938 u32 log2_max_MBps;
939 } mm_stats;
940
941 /* display */
942 bool enable_virtual_display;
943 struct amdgpu_vkms_output *amdgpu_vkms_output;
944 struct amdgpu_mode_info mode_info;
945 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
946 struct delayed_work hotplug_work;
947 struct amdgpu_irq_src crtc_irq;
948 struct amdgpu_irq_src vline0_irq;
949 struct amdgpu_irq_src vupdate_irq;
950 struct amdgpu_irq_src pageflip_irq;
951 struct amdgpu_irq_src hpd_irq;
952 struct amdgpu_irq_src dmub_trace_irq;
953 struct amdgpu_irq_src dmub_outbox_irq;
954
955 /* rings */
956 u64 fence_context;
957 unsigned num_rings;
958 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
959 struct dma_fence __rcu *gang_submit;
960 bool ib_pool_ready;
961 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
962 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
963
964 /* interrupts */
965 struct amdgpu_irq irq;
966
967 /* powerplay */
968 struct amd_powerplay powerplay;
969 struct amdgpu_pm pm;
970 u64 cg_flags;
971 u32 pg_flags;
972
973 /* nbio */
974 struct amdgpu_nbio nbio;
975
976 /* hdp */
977 struct amdgpu_hdp hdp;
978
979 /* smuio */
980 struct amdgpu_smuio smuio;
981
982 /* mmhub */
983 struct amdgpu_mmhub mmhub;
984
985 /* gfxhub */
986 struct amdgpu_gfxhub gfxhub;
987
988 /* gfx */
989 struct amdgpu_gfx gfx;
990
991 /* sdma */
992 struct amdgpu_sdma sdma;
993
994 /* lsdma */
995 struct amdgpu_lsdma lsdma;
996
997 /* uvd */
998 struct amdgpu_uvd uvd;
999
1000 /* vce */
1001 struct amdgpu_vce vce;
1002
1003 /* vcn */
1004 struct amdgpu_vcn vcn;
1005
1006 /* jpeg */
1007 struct amdgpu_jpeg jpeg;
1008
1009 /* vpe */
1010 struct amdgpu_vpe vpe;
1011
1012 /* umsch */
1013 struct amdgpu_umsch_mm umsch_mm;
1014 bool enable_umsch_mm;
1015
1016 /* firmwares */
1017 struct amdgpu_firmware firmware;
1018
1019 /* PSP */
1020 struct psp_context psp;
1021
1022 /* GDS */
1023 struct amdgpu_gds gds;
1024
1025 /* for userq and VM fences */
1026 struct amdgpu_seq64 seq64;
1027
1028 /* KFD */
1029 struct amdgpu_kfd_dev kfd;
1030
1031 /* UMC */
1032 struct amdgpu_umc umc;
1033
1034 /* display related functionality */
1035 struct amdgpu_display_manager dm;
1036
1037 /* mes */
1038 bool enable_mes;
1039 bool enable_mes_kiq;
1040 struct amdgpu_mes mes;
1041 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
1042
1043 /* df */
1044 struct amdgpu_df df;
1045
1046 /* MCA */
1047 struct amdgpu_mca mca;
1048
1049 /* ACA */
1050 struct amdgpu_aca aca;
1051
1052 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
1053 uint32_t harvest_ip_mask;
1054 int num_ip_blocks;
1055 struct mutex mn_lock;
1056 DECLARE_HASHTABLE(mn_hash, 7);
1057
1058 /* tracking pinned memory */
1059 atomic64_t vram_pin_size;
1060 atomic64_t visible_pin_size;
1061 atomic64_t gart_pin_size;
1062
1063 /* soc15 register offset based on ip, instance and segment */
1064 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1065 struct amdgpu_ip_map_info ip_map;
1066
1067 /* delayed work_func for deferring clockgating during resume */
1068 struct delayed_work delayed_init_work;
1069
1070 struct amdgpu_virt virt;
1071
1072 /* link all shadow bo */
1073 struct list_head shadow_list;
1074 struct mutex shadow_list_lock;
1075
1076 /* record hw reset is performed */
1077 bool has_hw_reset;
1078 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1079
1080 /* s3/s4 mask */
1081 bool in_suspend;
1082 bool in_s3;
1083 bool in_s4;
1084 bool in_s0ix;
1085 /* indicate amdgpu suspension status */
1086 bool suspend_complete;
1087
1088 enum pp_mp1_state mp1_state;
1089 struct amdgpu_doorbell_index doorbell_index;
1090
1091 struct mutex notifier_lock;
1092
1093 int asic_reset_res;
1094 struct work_struct xgmi_reset_work;
1095 struct list_head reset_list;
1096
1097 long gfx_timeout;
1098 long sdma_timeout;
1099 long video_timeout;
1100 long compute_timeout;
1101 long psp_timeout;
1102
1103 uint64_t unique_id;
1104 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
1105
1106 /* enable runtime pm on the device */
1107 bool in_runpm;
1108 bool has_pr3;
1109
1110 bool ucode_sysfs_en;
1111
1112 struct amdgpu_fru_info *fru_info;
1113 atomic_t throttling_logging_enabled;
1114 struct ratelimit_state throttling_logging_rs;
1115 uint32_t ras_hw_enabled;
1116 uint32_t ras_enabled;
1117
1118 bool no_hw_access;
1119 struct pci_saved_state *pci_state;
1120 pci_channel_state_t pci_channel_state;
1121
1122 /* Track auto wait count on s_barrier settings */
1123 bool barrier_has_auto_waitcnt;
1124
1125 struct amdgpu_reset_control *reset_cntl;
1126 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
1127
1128 bool ram_is_direct_mapped;
1129
1130 struct list_head ras_list;
1131
1132 struct ip_discovery_top *ip_top;
1133
1134 struct amdgpu_reset_domain *reset_domain;
1135
1136 struct mutex benchmark_mutex;
1137
1138 struct amdgpu_reset_info reset_info;
1139
1140 bool scpm_enabled;
1141 uint32_t scpm_status;
1142
1143 struct work_struct reset_work;
1144
1145 bool job_hang;
1146 bool dc_enabled;
1147 /* Mask of active clusters */
1148 uint32_t aid_mask;
1149
1150 /* Debug */
1151 bool debug_vm;
1152 bool debug_largebar;
1153 bool debug_disable_soft_recovery;
1154 bool debug_use_vram_fw_buf;
1155};
1156
1157static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
1158 uint8_t ip, uint8_t inst)
1159{
1160 /* This considers only major/minor/rev and ignores
1161 * subrevision/variant fields.
1162 */
1163 return adev->ip_versions[ip][inst] & ~0xFFU;
1164}
1165
1166static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
1167 uint8_t ip, uint8_t inst)
1168{
1169 /* This returns full version - major/minor/rev/variant/subrevision */
1170 return adev->ip_versions[ip][inst];
1171}
1172
1173static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
1174{
1175 return container_of(ddev, struct amdgpu_device, ddev);
1176}
1177
1178static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
1179{
1180 return &adev->ddev;
1181}
1182
1183static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
1184{
1185 return container_of(bdev, struct amdgpu_device, mman.bdev);
1186}
1187
1188int amdgpu_device_init(struct amdgpu_device *adev,
1189 uint32_t flags);
1190void amdgpu_device_fini_hw(struct amdgpu_device *adev);
1191void amdgpu_device_fini_sw(struct amdgpu_device *adev);
1192
1193int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1194
1195void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
1196 void *buf, size_t size, bool write);
1197size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1198 void *buf, size_t size, bool write);
1199
1200void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
1201 void *buf, size_t size, bool write);
1202uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
1203 uint32_t inst, uint32_t reg_addr, char reg_name[],
1204 uint32_t expected_value, uint32_t mask);
1205uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
1206 uint32_t reg, uint32_t acc_flags);
1207u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1208 u64 reg_addr);
1209uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
1210 uint32_t reg, uint32_t acc_flags,
1211 uint32_t xcc_id);
1212void amdgpu_device_wreg(struct amdgpu_device *adev,
1213 uint32_t reg, uint32_t v,
1214 uint32_t acc_flags);
1215void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1216 u64 reg_addr, u32 reg_data);
1217void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
1218 uint32_t reg, uint32_t v,
1219 uint32_t acc_flags,
1220 uint32_t xcc_id);
1221void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1222 uint32_t reg, uint32_t v, uint32_t xcc_id);
1223void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1224uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1225
1226u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1227 u32 reg_addr);
1228u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1229 u32 reg_addr);
1230u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1231 u64 reg_addr);
1232void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1233 u32 reg_addr, u32 reg_data);
1234void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1235 u32 reg_addr, u64 reg_data);
1236void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1237 u64 reg_addr, u64 reg_data);
1238u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
1239bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1240bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1241
1242void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
1243
1244int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
1245 struct amdgpu_reset_context *reset_context);
1246
1247int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1248 struct amdgpu_reset_context *reset_context);
1249
1250int emu_soc_asic_init(struct amdgpu_device *adev);
1251
1252/*
1253 * Registers read & write functions.
1254 */
1255#define AMDGPU_REGS_NO_KIQ (1<<1)
1256#define AMDGPU_REGS_RLC (1<<2)
1257
1258#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1259#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1260
1261#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
1262#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
1263
1264#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1265#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1266
1267#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
1268#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
1269#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
1270#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1271#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1272#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
1273#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
1274#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1275#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1276#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1277#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1278#define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
1279#define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
1280#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
1281#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
1282#define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
1283#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
1284#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1285#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1286#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1287#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1288#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1289#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1290#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1291#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1292#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1293#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1294#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1295#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1296#define WREG32_P(reg, val, mask) \
1297 do { \
1298 uint32_t tmp_ = RREG32(reg); \
1299 tmp_ &= (mask); \
1300 tmp_ |= ((val) & ~(mask)); \
1301 WREG32(reg, tmp_); \
1302 } while (0)
1303#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1304#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1305#define WREG32_PLL_P(reg, val, mask) \
1306 do { \
1307 uint32_t tmp_ = RREG32_PLL(reg); \
1308 tmp_ &= (mask); \
1309 tmp_ |= ((val) & ~(mask)); \
1310 WREG32_PLL(reg, tmp_); \
1311 } while (0)
1312
1313#define WREG32_SMC_P(_Reg, _Val, _Mask) \
1314 do { \
1315 u32 tmp = RREG32_SMC(_Reg); \
1316 tmp &= (_Mask); \
1317 tmp |= ((_Val) & ~(_Mask)); \
1318 WREG32_SMC(_Reg, tmp); \
1319 } while (0)
1320
1321#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
1322
1323#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1324#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1325
1326#define REG_SET_FIELD(orig_val, reg, field, field_val) \
1327 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1328 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1329
1330#define REG_GET_FIELD(value, reg, field) \
1331 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1332
1333#define WREG32_FIELD(reg, field, val) \
1334 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1335
1336#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1337 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1338
1339#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
1340/*
1341 * BIOS helpers.
1342 */
1343#define RBIOS8(i) (adev->bios[i])
1344#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1345#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1346
1347/*
1348 * ASICs macro.
1349 */
1350#define amdgpu_asic_set_vga_state(adev, state) \
1351 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1352#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1353#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
1354#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1355#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1356#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1357#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1358#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1359#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1360#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1361#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1362#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1363#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1364#define amdgpu_asic_flush_hdp(adev, r) \
1365 ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
1366#define amdgpu_asic_invalidate_hdp(adev, r) \
1367 ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
1368 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
1369#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1370#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1371#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1372#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1373#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
1374#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
1375#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
1376#define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
1377 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
1378#define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
1379
1380#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter))
1381
1382#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
1383#define for_each_inst(i, inst_mask) \
1384 for (i = ffs(inst_mask); i-- != 0; \
1385 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
1386
1387/* Common functions */
1388bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
1389bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
1390int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1391 struct amdgpu_job *job,
1392 struct amdgpu_reset_context *reset_context);
1393void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1394int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1395bool amdgpu_device_need_post(struct amdgpu_device *adev);
1396bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
1397bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1398
1399void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1400 u64 num_vis_bytes);
1401int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1402void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1403 const u32 *registers,
1404 const u32 array_size);
1405
1406int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
1407bool amdgpu_device_supports_atpx(struct drm_device *dev);
1408bool amdgpu_device_supports_px(struct drm_device *dev);
1409bool amdgpu_device_supports_boco(struct drm_device *dev);
1410bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
1411bool amdgpu_device_supports_baco(struct drm_device *dev);
1412bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
1413 struct amdgpu_device *peer_adev);
1414int amdgpu_device_baco_enter(struct drm_device *dev);
1415int amdgpu_device_baco_exit(struct drm_device *dev);
1416
1417void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
1418 struct amdgpu_ring *ring);
1419void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
1420 struct amdgpu_ring *ring);
1421
1422void amdgpu_device_halt(struct amdgpu_device *adev);
1423u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
1424 u32 reg);
1425void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
1426 u32 reg, u32 v);
1427struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
1428 struct dma_fence *gang);
1429bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
1430
1431/* atpx handler */
1432#if defined(CONFIG_VGA_SWITCHEROO)
1433void amdgpu_register_atpx_handler(void);
1434void amdgpu_unregister_atpx_handler(void);
1435bool amdgpu_has_atpx_dgpu_power_cntl(void);
1436bool amdgpu_is_atpx_hybrid(void);
1437bool amdgpu_atpx_dgpu_req_power_for_displays(void);
1438bool amdgpu_has_atpx(void);
1439#else
1440static inline void amdgpu_register_atpx_handler(void) {}
1441static inline void amdgpu_unregister_atpx_handler(void) {}
1442static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1443static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
1444static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
1445static inline bool amdgpu_has_atpx(void) { return false; }
1446#endif
1447
1448#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
1449void *amdgpu_atpx_get_dhandle(void);
1450#else
1451static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
1452#endif
1453
1454/*
1455 * KMS
1456 */
1457extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1458extern const int amdgpu_max_kms_ioctl;
1459
1460int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
1461void amdgpu_driver_unload_kms(struct drm_device *dev);
1462void amdgpu_driver_lastclose_kms(struct drm_device *dev);
1463int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1464void amdgpu_driver_postclose_kms(struct drm_device *dev,
1465 struct drm_file *file_priv);
1466void amdgpu_driver_release_kms(struct drm_device *dev);
1467
1468int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1469int amdgpu_device_prepare(struct drm_device *dev);
1470int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
1471int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
1472u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
1473int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
1474void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
1475int amdgpu_info_ioctl(struct drm_device *dev, void *data,
1476 struct drm_file *filp);
1477
1478/*
1479 * functions used by amdgpu_encoder.c
1480 */
1481struct amdgpu_afmt_acr {
1482 u32 clock;
1483
1484 int n_32khz;
1485 int cts_32khz;
1486
1487 int n_44_1khz;
1488 int cts_44_1khz;
1489
1490 int n_48khz;
1491 int cts_48khz;
1492
1493};
1494
1495struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1496
1497/* amdgpu_acpi.c */
1498
1499struct amdgpu_numa_info {
1500 uint64_t size;
1501 int pxm;
1502 int nid;
1503};
1504
1505/* ATCS Device/Driver State */
1506#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
1507#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
1508#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0
1509#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1
1510
1511#if defined(CONFIG_ACPI)
1512int amdgpu_acpi_init(struct amdgpu_device *adev);
1513void amdgpu_acpi_fini(struct amdgpu_device *adev);
1514bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1515bool amdgpu_acpi_is_power_shift_control_supported(void);
1516int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1517 u8 perf_req, bool advertise);
1518int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1519 u8 dev_state, bool drv_state);
1520int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
1521int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1522int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
1523 u64 *tmr_size);
1524int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
1525 struct amdgpu_numa_info *numa_info);
1526
1527void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1528bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
1529void amdgpu_acpi_detect(void);
1530void amdgpu_acpi_release(void);
1531#else
1532static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1533static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
1534 u64 *tmr_offset, u64 *tmr_size)
1535{
1536 return -EINVAL;
1537}
1538static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
1539 int xcc_id,
1540 struct amdgpu_numa_info *numa_info)
1541{
1542 return -EINVAL;
1543}
1544static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1545static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
1546static inline void amdgpu_acpi_detect(void) { }
1547static inline void amdgpu_acpi_release(void) { }
1548static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
1549static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1550 u8 dev_state, bool drv_state) { return 0; }
1551static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1552 enum amdgpu_ss ss_state) { return 0; }
1553#endif
1554
1555#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1556bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1557bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1558void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
1559#else
1560static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1561static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1562static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
1563#endif
1564
1565#if defined(CONFIG_DRM_AMD_DC)
1566int amdgpu_dm_display_resume(struct amdgpu_device *adev );
1567#else
1568static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
1569#endif
1570
1571
1572void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
1573void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
1574
1575pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
1576 pci_channel_state_t state);
1577pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
1578pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
1579void amdgpu_pci_resume(struct pci_dev *pdev);
1580
1581bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
1582bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
1583
1584bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
1585
1586int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1587 enum amd_clockgating_state state);
1588int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
1589 enum amd_powergating_state state);
1590
1591static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
1592{
1593 return amdgpu_gpu_recovery != 0 &&
1594 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
1595 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
1596 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
1597 adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
1598}
1599
1600#include "amdgpu_object.h"
1601
1602static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
1603{
1604 return adev->gmc.tmz_enabled;
1605}
1606
1607int amdgpu_in_reset(struct amdgpu_device *adev);
1608
1609extern const struct attribute_group amdgpu_vram_mgr_attr_group;
1610extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
1611extern const struct attribute_group amdgpu_flash_attr_group;
1612
1613#endif
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#ifdef pr_fmt
32#undef pr_fmt
33#endif
34
35#define pr_fmt(fmt) "amdgpu: " fmt
36
37#ifdef dev_fmt
38#undef dev_fmt
39#endif
40
41#define dev_fmt(fmt) "amdgpu: " fmt
42
43#include "amdgpu_ctx.h"
44
45#include <linux/atomic.h>
46#include <linux/wait.h>
47#include <linux/list.h>
48#include <linux/kref.h>
49#include <linux/rbtree.h>
50#include <linux/hashtable.h>
51#include <linux/dma-fence.h>
52#include <linux/pci.h>
53
54#include <drm/ttm/ttm_bo.h>
55#include <drm/ttm/ttm_placement.h>
56
57#include <drm/amdgpu_drm.h>
58#include <drm/drm_gem.h>
59#include <drm/drm_ioctl.h>
60
61#include <kgd_kfd_interface.h>
62#include "dm_pp_interface.h"
63#include "kgd_pp_interface.h"
64
65#include "amd_shared.h"
66#include "amdgpu_mode.h"
67#include "amdgpu_ih.h"
68#include "amdgpu_irq.h"
69#include "amdgpu_ucode.h"
70#include "amdgpu_ttm.h"
71#include "amdgpu_psp.h"
72#include "amdgpu_gds.h"
73#include "amdgpu_sync.h"
74#include "amdgpu_ring.h"
75#include "amdgpu_vm.h"
76#include "amdgpu_dpm.h"
77#include "amdgpu_acp.h"
78#include "amdgpu_uvd.h"
79#include "amdgpu_vce.h"
80#include "amdgpu_vcn.h"
81#include "amdgpu_jpeg.h"
82#include "amdgpu_vpe.h"
83#include "amdgpu_umsch_mm.h"
84#include "amdgpu_gmc.h"
85#include "amdgpu_gfx.h"
86#include "amdgpu_sdma.h"
87#include "amdgpu_lsdma.h"
88#include "amdgpu_nbio.h"
89#include "amdgpu_hdp.h"
90#include "amdgpu_dm.h"
91#include "amdgpu_virt.h"
92#include "amdgpu_csa.h"
93#include "amdgpu_mes_ctx.h"
94#include "amdgpu_gart.h"
95#include "amdgpu_debugfs.h"
96#include "amdgpu_job.h"
97#include "amdgpu_bo_list.h"
98#include "amdgpu_gem.h"
99#include "amdgpu_doorbell.h"
100#include "amdgpu_amdkfd.h"
101#include "amdgpu_discovery.h"
102#include "amdgpu_mes.h"
103#include "amdgpu_umc.h"
104#include "amdgpu_mmhub.h"
105#include "amdgpu_gfxhub.h"
106#include "amdgpu_df.h"
107#include "amdgpu_smuio.h"
108#include "amdgpu_fdinfo.h"
109#include "amdgpu_mca.h"
110#include "amdgpu_aca.h"
111#include "amdgpu_ras.h"
112#include "amdgpu_xcp.h"
113#include "amdgpu_seq64.h"
114#include "amdgpu_reg_state.h"
115#if defined(CONFIG_DRM_AMD_ISP)
116#include "amdgpu_isp.h"
117#endif
118
119#define MAX_GPU_INSTANCE 64
120
121#define GFX_SLICE_PERIOD_MS 250
122
123struct amdgpu_gpu_instance {
124 struct amdgpu_device *adev;
125 int mgpu_fan_enabled;
126};
127
128struct amdgpu_mgpu_info {
129 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
130 struct mutex mutex;
131 uint32_t num_gpu;
132 uint32_t num_dgpu;
133 uint32_t num_apu;
134};
135
136enum amdgpu_ss {
137 AMDGPU_SS_DRV_LOAD,
138 AMDGPU_SS_DEV_D0,
139 AMDGPU_SS_DEV_D3,
140 AMDGPU_SS_DRV_UNLOAD
141};
142
143struct amdgpu_hwip_reg_entry {
144 u32 hwip;
145 u32 inst;
146 u32 seg;
147 u32 reg_offset;
148 const char *reg_name;
149};
150
151struct amdgpu_watchdog_timer {
152 bool timeout_fatal_disable;
153 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
154};
155
156#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
157
158/*
159 * Modules parameters.
160 */
161extern int amdgpu_modeset;
162extern unsigned int amdgpu_vram_limit;
163extern int amdgpu_vis_vram_limit;
164extern int amdgpu_gart_size;
165extern int amdgpu_gtt_size;
166extern int amdgpu_moverate;
167extern int amdgpu_audio;
168extern int amdgpu_disp_priority;
169extern int amdgpu_hw_i2c;
170extern int amdgpu_pcie_gen2;
171extern int amdgpu_msi;
172extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
173extern int amdgpu_dpm;
174extern int amdgpu_fw_load_type;
175extern int amdgpu_aspm;
176extern int amdgpu_runtime_pm;
177extern uint amdgpu_ip_block_mask;
178extern int amdgpu_bapm;
179extern int amdgpu_deep_color;
180extern int amdgpu_vm_size;
181extern int amdgpu_vm_block_size;
182extern int amdgpu_vm_fragment_size;
183extern int amdgpu_vm_fault_stop;
184extern int amdgpu_vm_debug;
185extern int amdgpu_vm_update_mode;
186extern int amdgpu_exp_hw_support;
187extern int amdgpu_dc;
188extern int amdgpu_sched_jobs;
189extern int amdgpu_sched_hw_submission;
190extern uint amdgpu_pcie_gen_cap;
191extern uint amdgpu_pcie_lane_cap;
192extern u64 amdgpu_cg_mask;
193extern uint amdgpu_pg_mask;
194extern uint amdgpu_sdma_phase_quantum;
195extern char *amdgpu_disable_cu;
196extern char *amdgpu_virtual_display;
197extern uint amdgpu_pp_feature_mask;
198extern uint amdgpu_force_long_training;
199extern int amdgpu_lbpw;
200extern int amdgpu_compute_multipipe;
201extern int amdgpu_gpu_recovery;
202extern int amdgpu_emu_mode;
203extern uint amdgpu_smu_memory_pool_size;
204extern int amdgpu_smu_pptable_id;
205extern uint amdgpu_dc_feature_mask;
206extern uint amdgpu_freesync_vid_mode;
207extern uint amdgpu_dc_debug_mask;
208extern uint amdgpu_dc_visual_confirm;
209extern int amdgpu_dm_abm_level;
210extern int amdgpu_backlight;
211extern int amdgpu_damage_clips;
212extern struct amdgpu_mgpu_info mgpu_info;
213extern int amdgpu_ras_enable;
214extern uint amdgpu_ras_mask;
215extern int amdgpu_bad_page_threshold;
216extern bool amdgpu_ignore_bad_page_threshold;
217extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
218extern int amdgpu_async_gfx_ring;
219extern int amdgpu_mcbp;
220extern int amdgpu_discovery;
221extern int amdgpu_mes;
222extern int amdgpu_mes_log_enable;
223extern int amdgpu_mes_kiq;
224extern int amdgpu_uni_mes;
225extern int amdgpu_noretry;
226extern int amdgpu_force_asic_type;
227extern int amdgpu_smartshift_bias;
228extern int amdgpu_use_xgmi_p2p;
229extern int amdgpu_mtype_local;
230extern bool enforce_isolation;
231#ifdef CONFIG_HSA_AMD
232extern int sched_policy;
233extern bool debug_evictions;
234extern bool no_system_mem_limit;
235extern int halt_if_hws_hang;
236extern uint amdgpu_svm_default_granularity;
237#else
238static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
239static const bool __maybe_unused debug_evictions; /* = false */
240static const bool __maybe_unused no_system_mem_limit;
241static const int __maybe_unused halt_if_hws_hang;
242#endif
243#ifdef CONFIG_HSA_AMD_P2P
244extern bool pcie_p2p;
245#endif
246
247extern int amdgpu_tmz;
248extern int amdgpu_reset_method;
249
250#ifdef CONFIG_DRM_AMDGPU_SI
251extern int amdgpu_si_support;
252#endif
253#ifdef CONFIG_DRM_AMDGPU_CIK
254extern int amdgpu_cik_support;
255#endif
256extern int amdgpu_num_kcq;
257
258#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
259#define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024)
260extern int amdgpu_vcnfw_log;
261extern int amdgpu_sg_display;
262extern int amdgpu_umsch_mm;
263extern int amdgpu_seamless;
264extern int amdgpu_umsch_mm_fwlog;
265
266extern int amdgpu_user_partt_mode;
267extern int amdgpu_agp;
268
269extern int amdgpu_wbrf;
270
271#define AMDGPU_VM_MAX_NUM_CTX 4096
272#define AMDGPU_SG_THRESHOLD (256*1024*1024)
273#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
274#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
275#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
276#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
277#define AMDGPUFB_CONN_LIMIT 4
278#define AMDGPU_BIOS_NUM_SCRATCH 16
279
280#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
281
282/* hard reset data */
283#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
284
285/* reset flags */
286#define AMDGPU_RESET_GFX (1 << 0)
287#define AMDGPU_RESET_COMPUTE (1 << 1)
288#define AMDGPU_RESET_DMA (1 << 2)
289#define AMDGPU_RESET_CP (1 << 3)
290#define AMDGPU_RESET_GRBM (1 << 4)
291#define AMDGPU_RESET_DMA1 (1 << 5)
292#define AMDGPU_RESET_RLC (1 << 6)
293#define AMDGPU_RESET_SEM (1 << 7)
294#define AMDGPU_RESET_IH (1 << 8)
295#define AMDGPU_RESET_VMC (1 << 9)
296#define AMDGPU_RESET_MC (1 << 10)
297#define AMDGPU_RESET_DISPLAY (1 << 11)
298#define AMDGPU_RESET_UVD (1 << 12)
299#define AMDGPU_RESET_VCE (1 << 13)
300#define AMDGPU_RESET_VCE1 (1 << 14)
301
302/* reset mask */
303#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
304#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
305#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
306#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
307
308/* max cursor sizes (in pixels) */
309#define CIK_CURSOR_WIDTH 128
310#define CIK_CURSOR_HEIGHT 128
311
312/* smart shift bias level limits */
313#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
314#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
315
316/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
317#define AMDGPU_SWCTF_EXTRA_DELAY 50
318
319struct amdgpu_xcp_mgr;
320struct amdgpu_device;
321struct amdgpu_irq_src;
322struct amdgpu_fpriv;
323struct amdgpu_bo_va_mapping;
324struct kfd_vm_fault_info;
325struct amdgpu_hive_info;
326struct amdgpu_reset_context;
327struct amdgpu_reset_control;
328
329enum amdgpu_cp_irq {
330 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
331 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
332 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
333 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
334 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
335 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
336 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
337 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
338 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
339 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
340
341 AMDGPU_CP_IRQ_LAST
342};
343
344enum amdgpu_thermal_irq {
345 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
346 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
347
348 AMDGPU_THERMAL_IRQ_LAST
349};
350
351enum amdgpu_kiq_irq {
352 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
353 AMDGPU_CP_KIQ_IRQ_LAST
354};
355#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
356#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
357#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
358#define MAX_KIQ_REG_TRY 1000
359
360int amdgpu_device_ip_set_clockgating_state(void *dev,
361 enum amd_ip_block_type block_type,
362 enum amd_clockgating_state state);
363int amdgpu_device_ip_set_powergating_state(void *dev,
364 enum amd_ip_block_type block_type,
365 enum amd_powergating_state state);
366void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
367 u64 *flags);
368int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
369 enum amd_ip_block_type block_type);
370bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
371 enum amd_ip_block_type block_type);
372int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
373
374int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
375
376#define AMDGPU_MAX_IP_NUM 16
377
378struct amdgpu_ip_block_status {
379 bool valid;
380 bool sw;
381 bool hw;
382 bool late_initialized;
383 bool hang;
384};
385
386struct amdgpu_ip_block_version {
387 const enum amd_ip_block_type type;
388 const u32 major;
389 const u32 minor;
390 const u32 rev;
391 const struct amd_ip_funcs *funcs;
392};
393
394struct amdgpu_ip_block {
395 struct amdgpu_ip_block_status status;
396 const struct amdgpu_ip_block_version *version;
397 struct amdgpu_device *adev;
398};
399
400int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
401 enum amd_ip_block_type type,
402 u32 major, u32 minor);
403
404struct amdgpu_ip_block *
405amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
406 enum amd_ip_block_type type);
407
408int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
409 const struct amdgpu_ip_block_version *ip_block_version);
410
411/*
412 * BIOS.
413 */
414bool amdgpu_get_bios(struct amdgpu_device *adev);
415bool amdgpu_read_bios(struct amdgpu_device *adev);
416bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
417 u8 *bios, u32 length_bytes);
418/*
419 * Clocks
420 */
421
422#define AMDGPU_MAX_PPLL 3
423
424struct amdgpu_clock {
425 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
426 struct amdgpu_pll spll;
427 struct amdgpu_pll mpll;
428 /* 10 Khz units */
429 uint32_t default_mclk;
430 uint32_t default_sclk;
431 uint32_t default_dispclk;
432 uint32_t current_dispclk;
433 uint32_t dp_extclk;
434 uint32_t max_pixel_clock;
435};
436
437/* sub-allocation manager, it has to be protected by another lock.
438 * By conception this is an helper for other part of the driver
439 * like the indirect buffer or semaphore, which both have their
440 * locking.
441 *
442 * Principe is simple, we keep a list of sub allocation in offset
443 * order (first entry has offset == 0, last entry has the highest
444 * offset).
445 *
446 * When allocating new object we first check if there is room at
447 * the end total_size - (last_object_offset + last_object_size) >=
448 * alloc_size. If so we allocate new object there.
449 *
450 * When there is not enough room at the end, we start waiting for
451 * each sub object until we reach object_offset+object_size >=
452 * alloc_size, this object then become the sub object we return.
453 *
454 * Alignment can't be bigger than page size.
455 *
456 * Hole are not considered for allocation to keep things simple.
457 * Assumption is that there won't be hole (all object on same
458 * alignment).
459 */
460
461struct amdgpu_sa_manager {
462 struct drm_suballoc_manager base;
463 struct amdgpu_bo *bo;
464 uint64_t gpu_addr;
465 void *cpu_ptr;
466};
467
468int amdgpu_fence_slab_init(void);
469void amdgpu_fence_slab_fini(void);
470
471/*
472 * IRQS.
473 */
474
475struct amdgpu_flip_work {
476 struct delayed_work flip_work;
477 struct work_struct unpin_work;
478 struct amdgpu_device *adev;
479 int crtc_id;
480 u32 target_vblank;
481 uint64_t base;
482 struct drm_pending_vblank_event *event;
483 struct amdgpu_bo *old_abo;
484 unsigned shared_count;
485 struct dma_fence **shared;
486 struct dma_fence_cb cb;
487 bool async;
488};
489
490
491/*
492 * file private structure
493 */
494
495struct amdgpu_fpriv {
496 struct amdgpu_vm vm;
497 struct amdgpu_bo_va *prt_va;
498 struct amdgpu_bo_va *csa_va;
499 struct amdgpu_bo_va *seq64_va;
500 struct mutex bo_list_lock;
501 struct idr bo_list_handles;
502 struct amdgpu_ctx_mgr ctx_mgr;
503 /** GPU partition selection */
504 uint32_t xcp_id;
505};
506
507int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
508
509/*
510 * Writeback
511 */
512#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
513
514struct amdgpu_wb {
515 struct amdgpu_bo *wb_obj;
516 volatile uint32_t *wb;
517 uint64_t gpu_addr;
518 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
519 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
520 spinlock_t lock;
521};
522
523int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
524void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
525
526/*
527 * Benchmarking
528 */
529int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
530
531/*
532 * ASIC specific register table accessible by UMD
533 */
534struct amdgpu_allowed_register_entry {
535 uint32_t reg_offset;
536 bool grbm_indexed;
537};
538
539/**
540 * enum amd_reset_method - Methods for resetting AMD GPU devices
541 *
542 * @AMD_RESET_METHOD_NONE: The device will not be reset.
543 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
544 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
545 * any device.
546 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
547 * individually. Suitable only for some discrete GPU, not
548 * available for all ASICs.
549 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
550 * are reset depends on the ASIC. Notably doesn't reset IPs
551 * shared with the CPU on APUs or the memory controllers (so
552 * VRAM is not lost). Not available on all ASICs.
553 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
554 * but without powering off the PCI bus. Suitable only for
555 * discrete GPUs.
556 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
557 * and does a secondary bus reset or FLR, depending on what the
558 * underlying hardware supports.
559 *
560 * Methods available for AMD GPU driver for resetting the device. Not all
561 * methods are suitable for every device. User can override the method using
562 * module parameter `reset_method`.
563 */
564enum amd_reset_method {
565 AMD_RESET_METHOD_NONE = -1,
566 AMD_RESET_METHOD_LEGACY = 0,
567 AMD_RESET_METHOD_MODE0,
568 AMD_RESET_METHOD_MODE1,
569 AMD_RESET_METHOD_MODE2,
570 AMD_RESET_METHOD_BACO,
571 AMD_RESET_METHOD_PCI,
572 AMD_RESET_METHOD_ON_INIT,
573};
574
575struct amdgpu_video_codec_info {
576 u32 codec_type;
577 u32 max_width;
578 u32 max_height;
579 u32 max_pixels_per_frame;
580 u32 max_level;
581};
582
583#define codec_info_build(type, width, height, level) \
584 .codec_type = type,\
585 .max_width = width,\
586 .max_height = height,\
587 .max_pixels_per_frame = height * width,\
588 .max_level = level,
589
590struct amdgpu_video_codecs {
591 const u32 codec_count;
592 const struct amdgpu_video_codec_info *codec_array;
593};
594
595/*
596 * ASIC specific functions.
597 */
598struct amdgpu_asic_funcs {
599 bool (*read_disabled_bios)(struct amdgpu_device *adev);
600 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
601 u8 *bios, u32 length_bytes);
602 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
603 u32 sh_num, u32 reg_offset, u32 *value);
604 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
605 int (*reset)(struct amdgpu_device *adev);
606 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
607 /* get the reference clock */
608 u32 (*get_xclk)(struct amdgpu_device *adev);
609 /* MM block clocks */
610 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
611 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
612 /* static power management */
613 int (*get_pcie_lanes)(struct amdgpu_device *adev);
614 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
615 /* get config memsize register */
616 u32 (*get_config_memsize)(struct amdgpu_device *adev);
617 /* flush hdp write queue */
618 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
619 /* invalidate hdp read cache */
620 void (*invalidate_hdp)(struct amdgpu_device *adev,
621 struct amdgpu_ring *ring);
622 /* check if the asic needs a full reset of if soft reset will work */
623 bool (*need_full_reset)(struct amdgpu_device *adev);
624 /* initialize doorbell layout for specific asic*/
625 void (*init_doorbell_index)(struct amdgpu_device *adev);
626 /* PCIe bandwidth usage */
627 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
628 uint64_t *count1);
629 /* do we need to reset the asic at init time (e.g., kexec) */
630 bool (*need_reset_on_init)(struct amdgpu_device *adev);
631 /* PCIe replay counter */
632 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
633 /* device supports BACO */
634 int (*supports_baco)(struct amdgpu_device *adev);
635 /* pre asic_init quirks */
636 void (*pre_asic_init)(struct amdgpu_device *adev);
637 /* enter/exit umd stable pstate */
638 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
639 /* query video codecs */
640 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
641 const struct amdgpu_video_codecs **codecs);
642 /* encode "> 32bits" smn addressing */
643 u64 (*encode_ext_smn_addressing)(int ext_id);
644
645 ssize_t (*get_reg_state)(struct amdgpu_device *adev,
646 enum amdgpu_reg_state reg_state, void *buf,
647 size_t max_size);
648};
649
650/*
651 * IOCTL.
652 */
653int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
654 struct drm_file *filp);
655
656int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
657int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
658 struct drm_file *filp);
659int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
660int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
661 struct drm_file *filp);
662
663/* VRAM scratch page for HDP bug, default vram page */
664struct amdgpu_mem_scratch {
665 struct amdgpu_bo *robj;
666 volatile uint32_t *ptr;
667 u64 gpu_addr;
668};
669
670/*
671 * CGS
672 */
673struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
674void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
675
676/*
677 * Core structure, functions and helpers.
678 */
679typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
680typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
681
682typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
683typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
684
685typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
686typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
687
688typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t);
689typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t);
690
691typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
692typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
693
694struct amdgpu_mmio_remap {
695 u32 reg_offset;
696 resource_size_t bus_addr;
697};
698
699/* Define the HW IP blocks will be used in driver , add more if necessary */
700enum amd_hw_ip_block_type {
701 GC_HWIP = 1,
702 HDP_HWIP,
703 SDMA0_HWIP,
704 SDMA1_HWIP,
705 SDMA2_HWIP,
706 SDMA3_HWIP,
707 SDMA4_HWIP,
708 SDMA5_HWIP,
709 SDMA6_HWIP,
710 SDMA7_HWIP,
711 LSDMA_HWIP,
712 MMHUB_HWIP,
713 ATHUB_HWIP,
714 NBIO_HWIP,
715 MP0_HWIP,
716 MP1_HWIP,
717 UVD_HWIP,
718 VCN_HWIP = UVD_HWIP,
719 JPEG_HWIP = VCN_HWIP,
720 VCN1_HWIP,
721 VCE_HWIP,
722 VPE_HWIP,
723 DF_HWIP,
724 DCE_HWIP,
725 OSSSYS_HWIP,
726 SMUIO_HWIP,
727 PWR_HWIP,
728 NBIF_HWIP,
729 THM_HWIP,
730 CLK_HWIP,
731 UMC_HWIP,
732 RSMU_HWIP,
733 XGMI_HWIP,
734 DCI_HWIP,
735 PCIE_HWIP,
736 ISP_HWIP,
737 MAX_HWIP
738};
739
740#define HWIP_MAX_INSTANCE 44
741
742#define HW_ID_MAX 300
743#define IP_VERSION_FULL(mj, mn, rv, var, srev) \
744 (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev))
745#define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0)
746#define IP_VERSION_MAJ(ver) ((ver) >> 24)
747#define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF)
748#define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF)
749#define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF)
750#define IP_VERSION_SUBREV(ver) ((ver) & 0xF)
751#define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8)
752
753struct amdgpu_ip_map_info {
754 /* Map of logical to actual dev instances/mask */
755 uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
756 int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
757 enum amd_hw_ip_block_type block,
758 int8_t inst);
759 uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
760 enum amd_hw_ip_block_type block,
761 uint32_t mask);
762};
763
764struct amd_powerplay {
765 void *pp_handle;
766 const struct amd_pm_funcs *pp_funcs;
767};
768
769struct ip_discovery_top;
770
771/* polaris10 kickers */
772#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
773 ((rid == 0xE3) || \
774 (rid == 0xE4) || \
775 (rid == 0xE5) || \
776 (rid == 0xE7) || \
777 (rid == 0xEF))) || \
778 ((did == 0x6FDF) && \
779 ((rid == 0xE7) || \
780 (rid == 0xEF) || \
781 (rid == 0xFF))))
782
783#define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \
784 ((rid == 0xE1) || \
785 (rid == 0xF7)))
786
787/* polaris11 kickers */
788#define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \
789 ((rid == 0xE0) || \
790 (rid == 0xE5))) || \
791 ((did == 0x67FF) && \
792 ((rid == 0xCF) || \
793 (rid == 0xEF) || \
794 (rid == 0xFF))))
795
796#define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \
797 ((rid == 0xE2)))
798
799/* polaris12 kickers */
800#define ASICID_IS_P23(did, rid) (((did == 0x6987) && \
801 ((rid == 0xC0) || \
802 (rid == 0xC1) || \
803 (rid == 0xC3) || \
804 (rid == 0xC7))) || \
805 ((did == 0x6981) && \
806 ((rid == 0x00) || \
807 (rid == 0x01) || \
808 (rid == 0x10))))
809
810struct amdgpu_mqd_prop {
811 uint64_t mqd_gpu_addr;
812 uint64_t hqd_base_gpu_addr;
813 uint64_t rptr_gpu_addr;
814 uint64_t wptr_gpu_addr;
815 uint32_t queue_size;
816 bool use_doorbell;
817 uint32_t doorbell_index;
818 uint64_t eop_gpu_addr;
819 uint32_t hqd_pipe_priority;
820 uint32_t hqd_queue_priority;
821 bool allow_tunneling;
822 bool hqd_active;
823};
824
825struct amdgpu_mqd {
826 unsigned mqd_size;
827 int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
828 struct amdgpu_mqd_prop *p);
829};
830
831/*
832 * Custom Init levels could be defined for different situations where a full
833 * initialization of all hardware blocks are not expected. Sample cases are
834 * custom init sequences after resume after S0i3/S3, reset on initialization,
835 * partial reset of blocks etc. Presently, this defines only two levels. Levels
836 * are described in corresponding struct definitions - amdgpu_init_default,
837 * amdgpu_init_minimal_xgmi.
838 */
839enum amdgpu_init_lvl_id {
840 AMDGPU_INIT_LEVEL_DEFAULT,
841 AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
842 AMDGPU_INIT_LEVEL_RESET_RECOVERY,
843};
844
845struct amdgpu_init_level {
846 enum amdgpu_init_lvl_id level;
847 uint32_t hwini_ip_block_mask;
848};
849
850#define AMDGPU_RESET_MAGIC_NUM 64
851#define AMDGPU_MAX_DF_PERFMONS 4
852struct amdgpu_reset_domain;
853struct amdgpu_fru_info;
854
855/*
856 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
857 */
858#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
859
860struct amdgpu_device {
861 struct device *dev;
862 struct pci_dev *pdev;
863 struct drm_device ddev;
864
865#ifdef CONFIG_DRM_AMD_ACP
866 struct amdgpu_acp acp;
867#endif
868 struct amdgpu_hive_info *hive;
869 struct amdgpu_xcp_mgr *xcp_mgr;
870 /* ASIC */
871 enum amd_asic_type asic_type;
872 uint32_t family;
873 uint32_t rev_id;
874 uint32_t external_rev_id;
875 unsigned long flags;
876 unsigned long apu_flags;
877 int usec_timeout;
878 const struct amdgpu_asic_funcs *asic_funcs;
879 bool shutdown;
880 bool need_swiotlb;
881 bool accel_working;
882 struct notifier_block acpi_nb;
883 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
884 struct debugfs_blob_wrapper debugfs_vbios_blob;
885 struct debugfs_blob_wrapper debugfs_discovery_blob;
886 struct mutex srbm_mutex;
887 /* GRBM index mutex. Protects concurrent access to GRBM index */
888 struct mutex grbm_idx_mutex;
889 struct dev_pm_domain vga_pm_domain;
890 bool have_disp_power_ref;
891 bool have_atomics_support;
892
893 /* BIOS */
894 bool is_atom_fw;
895 uint8_t *bios;
896 uint32_t bios_size;
897 uint32_t bios_scratch_reg_offset;
898 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
899
900 /* Register/doorbell mmio */
901 resource_size_t rmmio_base;
902 resource_size_t rmmio_size;
903 void __iomem *rmmio;
904 /* protects concurrent MM_INDEX/DATA based register access */
905 spinlock_t mmio_idx_lock;
906 struct amdgpu_mmio_remap rmmio_remap;
907 /* protects concurrent SMC based register access */
908 spinlock_t smc_idx_lock;
909 amdgpu_rreg_t smc_rreg;
910 amdgpu_wreg_t smc_wreg;
911 /* protects concurrent PCIE register access */
912 spinlock_t pcie_idx_lock;
913 amdgpu_rreg_t pcie_rreg;
914 amdgpu_wreg_t pcie_wreg;
915 amdgpu_rreg_t pciep_rreg;
916 amdgpu_wreg_t pciep_wreg;
917 amdgpu_rreg_ext_t pcie_rreg_ext;
918 amdgpu_wreg_ext_t pcie_wreg_ext;
919 amdgpu_rreg64_t pcie_rreg64;
920 amdgpu_wreg64_t pcie_wreg64;
921 amdgpu_rreg64_ext_t pcie_rreg64_ext;
922 amdgpu_wreg64_ext_t pcie_wreg64_ext;
923 /* protects concurrent UVD register access */
924 spinlock_t uvd_ctx_idx_lock;
925 amdgpu_rreg_t uvd_ctx_rreg;
926 amdgpu_wreg_t uvd_ctx_wreg;
927 /* protects concurrent DIDT register access */
928 spinlock_t didt_idx_lock;
929 amdgpu_rreg_t didt_rreg;
930 amdgpu_wreg_t didt_wreg;
931 /* protects concurrent gc_cac register access */
932 spinlock_t gc_cac_idx_lock;
933 amdgpu_rreg_t gc_cac_rreg;
934 amdgpu_wreg_t gc_cac_wreg;
935 /* protects concurrent se_cac register access */
936 spinlock_t se_cac_idx_lock;
937 amdgpu_rreg_t se_cac_rreg;
938 amdgpu_wreg_t se_cac_wreg;
939 /* protects concurrent ENDPOINT (audio) register access */
940 spinlock_t audio_endpt_idx_lock;
941 amdgpu_block_rreg_t audio_endpt_rreg;
942 amdgpu_block_wreg_t audio_endpt_wreg;
943 struct amdgpu_doorbell doorbell;
944
945 /* clock/pll info */
946 struct amdgpu_clock clock;
947
948 /* MC */
949 struct amdgpu_gmc gmc;
950 struct amdgpu_gart gart;
951 dma_addr_t dummy_page_addr;
952 struct amdgpu_vm_manager vm_manager;
953 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
954 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
955
956 /* memory management */
957 struct amdgpu_mman mman;
958 struct amdgpu_mem_scratch mem_scratch;
959 struct amdgpu_wb wb;
960 atomic64_t num_bytes_moved;
961 atomic64_t num_evictions;
962 atomic64_t num_vram_cpu_page_faults;
963 atomic_t gpu_reset_counter;
964 atomic_t vram_lost_counter;
965
966 /* data for buffer migration throttling */
967 struct {
968 spinlock_t lock;
969 s64 last_update_us;
970 s64 accum_us; /* accumulated microseconds */
971 s64 accum_us_vis; /* for visible VRAM */
972 u32 log2_max_MBps;
973 } mm_stats;
974
975 /* display */
976 bool enable_virtual_display;
977 struct amdgpu_vkms_output *amdgpu_vkms_output;
978 struct amdgpu_mode_info mode_info;
979 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
980 struct delayed_work hotplug_work;
981 struct amdgpu_irq_src crtc_irq;
982 struct amdgpu_irq_src vline0_irq;
983 struct amdgpu_irq_src vupdate_irq;
984 struct amdgpu_irq_src pageflip_irq;
985 struct amdgpu_irq_src hpd_irq;
986 struct amdgpu_irq_src dmub_trace_irq;
987 struct amdgpu_irq_src dmub_outbox_irq;
988
989 /* rings */
990 u64 fence_context;
991 unsigned num_rings;
992 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
993 struct dma_fence __rcu *gang_submit;
994 bool ib_pool_ready;
995 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
996 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
997
998 /* interrupts */
999 struct amdgpu_irq irq;
1000
1001 /* powerplay */
1002 struct amd_powerplay powerplay;
1003 struct amdgpu_pm pm;
1004 u64 cg_flags;
1005 u32 pg_flags;
1006
1007 /* nbio */
1008 struct amdgpu_nbio nbio;
1009
1010 /* hdp */
1011 struct amdgpu_hdp hdp;
1012
1013 /* smuio */
1014 struct amdgpu_smuio smuio;
1015
1016 /* mmhub */
1017 struct amdgpu_mmhub mmhub;
1018
1019 /* gfxhub */
1020 struct amdgpu_gfxhub gfxhub;
1021
1022 /* gfx */
1023 struct amdgpu_gfx gfx;
1024
1025 /* sdma */
1026 struct amdgpu_sdma sdma;
1027
1028 /* lsdma */
1029 struct amdgpu_lsdma lsdma;
1030
1031 /* uvd */
1032 struct amdgpu_uvd uvd;
1033
1034 /* vce */
1035 struct amdgpu_vce vce;
1036
1037 /* vcn */
1038 struct amdgpu_vcn vcn;
1039
1040 /* jpeg */
1041 struct amdgpu_jpeg jpeg;
1042
1043 /* vpe */
1044 struct amdgpu_vpe vpe;
1045
1046 /* umsch */
1047 struct amdgpu_umsch_mm umsch_mm;
1048 bool enable_umsch_mm;
1049
1050 /* firmwares */
1051 struct amdgpu_firmware firmware;
1052
1053 /* PSP */
1054 struct psp_context psp;
1055
1056 /* GDS */
1057 struct amdgpu_gds gds;
1058
1059 /* for userq and VM fences */
1060 struct amdgpu_seq64 seq64;
1061
1062 /* KFD */
1063 struct amdgpu_kfd_dev kfd;
1064
1065 /* UMC */
1066 struct amdgpu_umc umc;
1067
1068 /* display related functionality */
1069 struct amdgpu_display_manager dm;
1070
1071#if defined(CONFIG_DRM_AMD_ISP)
1072 /* isp */
1073 struct amdgpu_isp isp;
1074#endif
1075
1076 /* mes */
1077 bool enable_mes;
1078 bool enable_mes_kiq;
1079 bool enable_uni_mes;
1080 struct amdgpu_mes mes;
1081 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
1082
1083 /* df */
1084 struct amdgpu_df df;
1085
1086 /* MCA */
1087 struct amdgpu_mca mca;
1088
1089 /* ACA */
1090 struct amdgpu_aca aca;
1091
1092 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
1093 uint32_t harvest_ip_mask;
1094 int num_ip_blocks;
1095 struct mutex mn_lock;
1096 DECLARE_HASHTABLE(mn_hash, 7);
1097
1098 /* tracking pinned memory */
1099 atomic64_t vram_pin_size;
1100 atomic64_t visible_pin_size;
1101 atomic64_t gart_pin_size;
1102
1103 /* soc15 register offset based on ip, instance and segment */
1104 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1105 struct amdgpu_ip_map_info ip_map;
1106
1107 /* delayed work_func for deferring clockgating during resume */
1108 struct delayed_work delayed_init_work;
1109
1110 struct amdgpu_virt virt;
1111
1112 /* record hw reset is performed */
1113 bool has_hw_reset;
1114 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1115
1116 /* s3/s4 mask */
1117 bool in_suspend;
1118 bool in_s3;
1119 bool in_s4;
1120 bool in_s0ix;
1121
1122 enum pp_mp1_state mp1_state;
1123 struct amdgpu_doorbell_index doorbell_index;
1124
1125 struct mutex notifier_lock;
1126
1127 int asic_reset_res;
1128 struct work_struct xgmi_reset_work;
1129 struct list_head reset_list;
1130
1131 long gfx_timeout;
1132 long sdma_timeout;
1133 long video_timeout;
1134 long compute_timeout;
1135 long psp_timeout;
1136
1137 uint64_t unique_id;
1138 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
1139
1140 /* enable runtime pm on the device */
1141 bool in_runpm;
1142 bool has_pr3;
1143
1144 bool ucode_sysfs_en;
1145
1146 struct amdgpu_fru_info *fru_info;
1147 atomic_t throttling_logging_enabled;
1148 struct ratelimit_state throttling_logging_rs;
1149 uint32_t ras_hw_enabled;
1150 uint32_t ras_enabled;
1151
1152 bool no_hw_access;
1153 struct pci_saved_state *pci_state;
1154 pci_channel_state_t pci_channel_state;
1155
1156 /* Track auto wait count on s_barrier settings */
1157 bool barrier_has_auto_waitcnt;
1158
1159 struct amdgpu_reset_control *reset_cntl;
1160 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
1161
1162 bool ram_is_direct_mapped;
1163
1164 struct list_head ras_list;
1165
1166 struct ip_discovery_top *ip_top;
1167
1168 struct amdgpu_reset_domain *reset_domain;
1169
1170 struct mutex benchmark_mutex;
1171
1172 bool scpm_enabled;
1173 uint32_t scpm_status;
1174
1175 struct work_struct reset_work;
1176
1177 bool job_hang;
1178 bool dc_enabled;
1179 /* Mask of active clusters */
1180 uint32_t aid_mask;
1181
1182 /* Debug */
1183 bool debug_vm;
1184 bool debug_largebar;
1185 bool debug_disable_soft_recovery;
1186 bool debug_use_vram_fw_buf;
1187 bool debug_enable_ras_aca;
1188 bool debug_exp_resets;
1189
1190 bool enforce_isolation[MAX_XCP];
1191 /* Added this mutex for cleaner shader isolation between GFX and compute processes */
1192 struct mutex enforce_isolation_mutex;
1193
1194 struct amdgpu_init_level *init_lvl;
1195};
1196
1197static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
1198 uint8_t ip, uint8_t inst)
1199{
1200 /* This considers only major/minor/rev and ignores
1201 * subrevision/variant fields.
1202 */
1203 return adev->ip_versions[ip][inst] & ~0xFFU;
1204}
1205
1206static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
1207 uint8_t ip, uint8_t inst)
1208{
1209 /* This returns full version - major/minor/rev/variant/subrevision */
1210 return adev->ip_versions[ip][inst];
1211}
1212
1213static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
1214{
1215 return container_of(ddev, struct amdgpu_device, ddev);
1216}
1217
1218static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
1219{
1220 return &adev->ddev;
1221}
1222
1223static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
1224{
1225 return container_of(bdev, struct amdgpu_device, mman.bdev);
1226}
1227
1228int amdgpu_device_init(struct amdgpu_device *adev,
1229 uint32_t flags);
1230void amdgpu_device_fini_hw(struct amdgpu_device *adev);
1231void amdgpu_device_fini_sw(struct amdgpu_device *adev);
1232
1233int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1234
1235void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
1236 void *buf, size_t size, bool write);
1237size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1238 void *buf, size_t size, bool write);
1239
1240void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
1241 void *buf, size_t size, bool write);
1242uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
1243 uint32_t inst, uint32_t reg_addr, char reg_name[],
1244 uint32_t expected_value, uint32_t mask);
1245uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
1246 uint32_t reg, uint32_t acc_flags);
1247u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1248 u64 reg_addr);
1249uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
1250 uint32_t reg, uint32_t acc_flags,
1251 uint32_t xcc_id);
1252void amdgpu_device_wreg(struct amdgpu_device *adev,
1253 uint32_t reg, uint32_t v,
1254 uint32_t acc_flags);
1255void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1256 u64 reg_addr, u32 reg_data);
1257void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
1258 uint32_t reg, uint32_t v,
1259 uint32_t acc_flags,
1260 uint32_t xcc_id);
1261void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1262 uint32_t reg, uint32_t v, uint32_t xcc_id);
1263void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1264uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1265
1266u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1267 u32 reg_addr);
1268u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1269 u32 reg_addr);
1270u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1271 u64 reg_addr);
1272void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1273 u32 reg_addr, u32 reg_data);
1274void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1275 u32 reg_addr, u64 reg_data);
1276void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1277 u64 reg_addr, u64 reg_data);
1278u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
1279bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1280bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1281
1282void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
1283
1284int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
1285 struct amdgpu_reset_context *reset_context);
1286
1287int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1288 struct amdgpu_reset_context *reset_context);
1289
1290int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
1291
1292int emu_soc_asic_init(struct amdgpu_device *adev);
1293
1294/*
1295 * Registers read & write functions.
1296 */
1297#define AMDGPU_REGS_NO_KIQ (1<<1)
1298#define AMDGPU_REGS_RLC (1<<2)
1299
1300#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1301#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1302
1303#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
1304#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
1305
1306#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1307#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1308
1309#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
1310#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
1311#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
1312#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1313#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1314#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
1315#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
1316#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1317#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1318#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1319#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1320#define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
1321#define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
1322#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
1323#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
1324#define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
1325#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
1326#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1327#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1328#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1329#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1330#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1331#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1332#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1333#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1334#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1335#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1336#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1337#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1338#define WREG32_P(reg, val, mask) \
1339 do { \
1340 uint32_t tmp_ = RREG32(reg); \
1341 tmp_ &= (mask); \
1342 tmp_ |= ((val) & ~(mask)); \
1343 WREG32(reg, tmp_); \
1344 } while (0)
1345#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1346#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1347#define WREG32_PLL_P(reg, val, mask) \
1348 do { \
1349 uint32_t tmp_ = RREG32_PLL(reg); \
1350 tmp_ &= (mask); \
1351 tmp_ |= ((val) & ~(mask)); \
1352 WREG32_PLL(reg, tmp_); \
1353 } while (0)
1354
1355#define WREG32_SMC_P(_Reg, _Val, _Mask) \
1356 do { \
1357 u32 tmp = RREG32_SMC(_Reg); \
1358 tmp &= (_Mask); \
1359 tmp |= ((_Val) & ~(_Mask)); \
1360 WREG32_SMC(_Reg, tmp); \
1361 } while (0)
1362
1363#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
1364
1365#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1366#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1367
1368#define REG_SET_FIELD(orig_val, reg, field, field_val) \
1369 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1370 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1371
1372#define REG_GET_FIELD(value, reg, field) \
1373 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1374
1375#define WREG32_FIELD(reg, field, val) \
1376 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1377
1378#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1379 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1380
1381#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
1382/*
1383 * BIOS helpers.
1384 */
1385#define RBIOS8(i) (adev->bios[i])
1386#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1387#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1388
1389/*
1390 * ASICs macro.
1391 */
1392#define amdgpu_asic_set_vga_state(adev, state) \
1393 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1394#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1395#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
1396#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1397#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1398#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1399#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1400#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1401#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1402#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1403#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1404#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1405#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1406#define amdgpu_asic_flush_hdp(adev, r) \
1407 ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
1408#define amdgpu_asic_invalidate_hdp(adev, r) \
1409 ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
1410 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
1411#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1412#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1413#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1414#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1415#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
1416#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
1417#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
1418#define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
1419 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
1420#define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
1421
1422#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter))
1423
1424#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
1425#define for_each_inst(i, inst_mask) \
1426 for (i = ffs(inst_mask); i-- != 0; \
1427 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
1428
1429/* Common functions */
1430bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
1431bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
1432int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1433 struct amdgpu_job *job,
1434 struct amdgpu_reset_context *reset_context);
1435void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1436int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1437bool amdgpu_device_need_post(struct amdgpu_device *adev);
1438bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
1439bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1440
1441void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1442 u64 num_vis_bytes);
1443int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1444void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1445 const u32 *registers,
1446 const u32 array_size);
1447
1448int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
1449bool amdgpu_device_supports_atpx(struct drm_device *dev);
1450bool amdgpu_device_supports_px(struct drm_device *dev);
1451bool amdgpu_device_supports_boco(struct drm_device *dev);
1452bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
1453int amdgpu_device_supports_baco(struct drm_device *dev);
1454void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
1455bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
1456 struct amdgpu_device *peer_adev);
1457int amdgpu_device_baco_enter(struct drm_device *dev);
1458int amdgpu_device_baco_exit(struct drm_device *dev);
1459
1460void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
1461 struct amdgpu_ring *ring);
1462void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
1463 struct amdgpu_ring *ring);
1464
1465void amdgpu_device_halt(struct amdgpu_device *adev);
1466u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
1467 u32 reg);
1468void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
1469 u32 reg, u32 v);
1470struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
1471struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
1472 struct dma_fence *gang);
1473bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
1474ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
1475ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
1476
1477/* atpx handler */
1478#if defined(CONFIG_VGA_SWITCHEROO)
1479void amdgpu_register_atpx_handler(void);
1480void amdgpu_unregister_atpx_handler(void);
1481bool amdgpu_has_atpx_dgpu_power_cntl(void);
1482bool amdgpu_is_atpx_hybrid(void);
1483bool amdgpu_has_atpx(void);
1484#else
1485static inline void amdgpu_register_atpx_handler(void) {}
1486static inline void amdgpu_unregister_atpx_handler(void) {}
1487static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1488static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
1489static inline bool amdgpu_has_atpx(void) { return false; }
1490#endif
1491
1492/*
1493 * KMS
1494 */
1495extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1496extern const int amdgpu_max_kms_ioctl;
1497
1498int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
1499void amdgpu_driver_unload_kms(struct drm_device *dev);
1500int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1501void amdgpu_driver_postclose_kms(struct drm_device *dev,
1502 struct drm_file *file_priv);
1503void amdgpu_driver_release_kms(struct drm_device *dev);
1504
1505int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1506int amdgpu_device_prepare(struct drm_device *dev);
1507int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
1508int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
1509u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
1510int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
1511void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
1512int amdgpu_info_ioctl(struct drm_device *dev, void *data,
1513 struct drm_file *filp);
1514
1515/*
1516 * functions used by amdgpu_encoder.c
1517 */
1518struct amdgpu_afmt_acr {
1519 u32 clock;
1520
1521 int n_32khz;
1522 int cts_32khz;
1523
1524 int n_44_1khz;
1525 int cts_44_1khz;
1526
1527 int n_48khz;
1528 int cts_48khz;
1529
1530};
1531
1532struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1533
1534/* amdgpu_acpi.c */
1535
1536struct amdgpu_numa_info {
1537 uint64_t size;
1538 int pxm;
1539 int nid;
1540};
1541
1542/* ATCS Device/Driver State */
1543#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
1544#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
1545#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0
1546#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1
1547
1548#if defined(CONFIG_ACPI)
1549int amdgpu_acpi_init(struct amdgpu_device *adev);
1550void amdgpu_acpi_fini(struct amdgpu_device *adev);
1551bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1552bool amdgpu_acpi_is_power_shift_control_supported(void);
1553int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1554 u8 perf_req, bool advertise);
1555int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1556 u8 dev_state, bool drv_state);
1557int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
1558int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1559int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
1560 u64 *tmr_size);
1561int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
1562 struct amdgpu_numa_info *numa_info);
1563
1564void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1565bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
1566void amdgpu_acpi_detect(void);
1567void amdgpu_acpi_release(void);
1568#else
1569static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1570static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
1571 u64 *tmr_offset, u64 *tmr_size)
1572{
1573 return -EINVAL;
1574}
1575static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
1576 int xcc_id,
1577 struct amdgpu_numa_info *numa_info)
1578{
1579 return -EINVAL;
1580}
1581static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1582static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
1583static inline void amdgpu_acpi_detect(void) { }
1584static inline void amdgpu_acpi_release(void) { }
1585static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
1586static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1587 u8 dev_state, bool drv_state) { return 0; }
1588static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1589 enum amdgpu_ss ss_state) { return 0; }
1590static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
1591#endif
1592
1593#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1594bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1595bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1596void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
1597#else
1598static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
1599static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1600static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
1601#endif
1602
1603void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
1604void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
1605
1606pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
1607 pci_channel_state_t state);
1608pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
1609pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
1610void amdgpu_pci_resume(struct pci_dev *pdev);
1611
1612bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
1613bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
1614
1615bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
1616
1617int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1618 enum amd_clockgating_state state);
1619int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
1620 enum amd_powergating_state state);
1621
1622static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
1623{
1624 return amdgpu_gpu_recovery != 0 &&
1625 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
1626 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
1627 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
1628 adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
1629}
1630
1631#include "amdgpu_object.h"
1632
1633static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
1634{
1635 return adev->gmc.tmz_enabled;
1636}
1637
1638int amdgpu_in_reset(struct amdgpu_device *adev);
1639
1640extern const struct attribute_group amdgpu_vram_mgr_attr_group;
1641extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
1642extern const struct attribute_group amdgpu_flash_attr_group;
1643
1644void amdgpu_set_init_level(struct amdgpu_device *adev,
1645 enum amdgpu_init_lvl_id lvl);
1646#endif