Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/amdgpu_drm.h>
26#include <drm/drm_drv.h>
27#include <drm/drm_gem.h>
28#include <drm/drm_vblank.h>
29#include "amdgpu_drv.h"
30
31#include <drm/drm_pciids.h>
32#include <linux/console.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm_runtime.h>
36#include <linux/vga_switcheroo.h>
37#include <drm/drm_probe_helper.h>
38#include <linux/mmu_notifier.h>
39
40#include "amdgpu.h"
41#include "amdgpu_irq.h"
42#include "amdgpu_dma_buf.h"
43
44#include "amdgpu_amdkfd.h"
45
46#include "amdgpu_ras.h"
47
48/*
49 * KMS wrapper.
50 * - 3.0.0 - initial driver
51 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
52 * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
53 * at the end of IBs.
54 * - 3.3.0 - Add VM support for UVD on supported hardware.
55 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
56 * - 3.5.0 - Add support for new UVD_NO_OP register.
57 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
58 * - 3.7.0 - Add support for VCE clock list packet
59 * - 3.8.0 - Add support raster config init in the kernel
60 * - 3.9.0 - Add support for memory query info about VRAM and GTT.
61 * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
62 * - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
63 * - 3.12.0 - Add query for double offchip LDS buffers
64 * - 3.13.0 - Add PRT support
65 * - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
66 * - 3.15.0 - Export more gpu info for gfx9
67 * - 3.16.0 - Add reserved vmid support
68 * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
69 * - 3.18.0 - Export gpu always on cu bitmap
70 * - 3.19.0 - Add support for UVD MJPEG decode
71 * - 3.20.0 - Add support for local BOs
72 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
73 * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
74 * - 3.23.0 - Add query for VRAM lost counter
75 * - 3.24.0 - Add high priority compute support for gfx9
76 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
77 * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
78 * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
79 * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
80 * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
81 * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
82 * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
83 * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
84 * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
85 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
86 * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
87 * - 3.36.0 - Allow reading more status registers on si/cik
88 * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
89 * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
90 * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
91 */
92#define KMS_DRIVER_MAJOR 3
93#define KMS_DRIVER_MINOR 39
94#define KMS_DRIVER_PATCHLEVEL 0
95
96int amdgpu_vram_limit = 0;
97int amdgpu_vis_vram_limit = 0;
98int amdgpu_gart_size = -1; /* auto */
99int amdgpu_gtt_size = -1; /* auto */
100int amdgpu_moverate = -1; /* auto */
101int amdgpu_benchmarking = 0;
102int amdgpu_testing = 0;
103int amdgpu_audio = -1;
104int amdgpu_disp_priority = 0;
105int amdgpu_hw_i2c = 0;
106int amdgpu_pcie_gen2 = -1;
107int amdgpu_msi = -1;
108char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
109int amdgpu_dpm = -1;
110int amdgpu_fw_load_type = -1;
111int amdgpu_aspm = -1;
112int amdgpu_runtime_pm = -1;
113uint amdgpu_ip_block_mask = 0xffffffff;
114int amdgpu_bapm = -1;
115int amdgpu_deep_color = 0;
116int amdgpu_vm_size = -1;
117int amdgpu_vm_fragment_size = -1;
118int amdgpu_vm_block_size = -1;
119int amdgpu_vm_fault_stop = 0;
120int amdgpu_vm_debug = 0;
121int amdgpu_vm_update_mode = -1;
122int amdgpu_exp_hw_support = 0;
123int amdgpu_dc = -1;
124int amdgpu_sched_jobs = 32;
125int amdgpu_sched_hw_submission = 2;
126uint amdgpu_pcie_gen_cap = 0;
127uint amdgpu_pcie_lane_cap = 0;
128uint amdgpu_cg_mask = 0xffffffff;
129uint amdgpu_pg_mask = 0xffffffff;
130uint amdgpu_sdma_phase_quantum = 32;
131char *amdgpu_disable_cu = NULL;
132char *amdgpu_virtual_display = NULL;
133/* OverDrive(bit 14) disabled by default*/
134uint amdgpu_pp_feature_mask = 0xffffbfff;
135uint amdgpu_force_long_training = 0;
136int amdgpu_job_hang_limit = 0;
137int amdgpu_lbpw = -1;
138int amdgpu_compute_multipipe = -1;
139int amdgpu_gpu_recovery = -1; /* auto */
140int amdgpu_emu_mode = 0;
141uint amdgpu_smu_memory_pool_size = 0;
142/* FBC (bit 0) disabled by default*/
143uint amdgpu_dc_feature_mask = 0;
144uint amdgpu_dc_debug_mask = 0;
145int amdgpu_async_gfx_ring = 1;
146int amdgpu_mcbp = 0;
147int amdgpu_discovery = -1;
148int amdgpu_mes = 0;
149int amdgpu_noretry;
150int amdgpu_force_asic_type = -1;
151int amdgpu_tmz = 0;
152int amdgpu_reset_method = -1; /* auto */
153
154struct amdgpu_mgpu_info mgpu_info = {
155 .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
156};
157int amdgpu_ras_enable = -1;
158uint amdgpu_ras_mask = 0xffffffff;
159
160/**
161 * DOC: vramlimit (int)
162 * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
163 */
164MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
165module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
166
167/**
168 * DOC: vis_vramlimit (int)
169 * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
170 */
171MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
172module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
173
174/**
175 * DOC: gartsize (uint)
176 * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
177 */
178MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
179module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
180
181/**
182 * DOC: gttsize (int)
183 * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
184 * otherwise 3/4 RAM size).
185 */
186MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
187module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
188
189/**
190 * DOC: moverate (int)
191 * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
192 */
193MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
194module_param_named(moverate, amdgpu_moverate, int, 0600);
195
196/**
197 * DOC: benchmark (int)
198 * Run benchmarks. The default is 0 (Skip benchmarks).
199 */
200MODULE_PARM_DESC(benchmark, "Run benchmark");
201module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
202
203/**
204 * DOC: test (int)
205 * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
206 */
207MODULE_PARM_DESC(test, "Run tests");
208module_param_named(test, amdgpu_testing, int, 0444);
209
210/**
211 * DOC: audio (int)
212 * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
213 */
214MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
215module_param_named(audio, amdgpu_audio, int, 0444);
216
217/**
218 * DOC: disp_priority (int)
219 * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
220 */
221MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
222module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
223
224/**
225 * DOC: hw_i2c (int)
226 * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
227 */
228MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
229module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
230
231/**
232 * DOC: pcie_gen2 (int)
233 * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
234 */
235MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
236module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
237
238/**
239 * DOC: msi (int)
240 * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
241 */
242MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
243module_param_named(msi, amdgpu_msi, int, 0444);
244
245/**
246 * DOC: lockup_timeout (string)
247 * Set GPU scheduler timeout value in ms.
248 *
249 * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
250 * multiple values specified. 0 and negative values are invalidated. They will be adjusted
251 * to the default timeout.
252 *
253 * - With one value specified, the setting will apply to all non-compute jobs.
254 * - With multiple values specified, the first one will be for GFX.
255 * The second one is for Compute. The third and fourth ones are
256 * for SDMA and Video.
257 *
258 * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
259 * jobs is 10000. And there is no timeout enforced on compute jobs.
260 */
261MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
262 "for passthrough or sriov, 10000 for all jobs."
263 " 0: keep default value. negative: infinity timeout), "
264 "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
265 "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
266module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
267
268/**
269 * DOC: dpm (int)
270 * Override for dynamic power management setting
271 * (0 = disable, 1 = enable, 2 = enable sw smu driver for vega20)
272 * The default is -1 (auto).
273 */
274MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
275module_param_named(dpm, amdgpu_dpm, int, 0444);
276
277/**
278 * DOC: fw_load_type (int)
279 * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
280 */
281MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
282module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
283
284/**
285 * DOC: aspm (int)
286 * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
287 */
288MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
289module_param_named(aspm, amdgpu_aspm, int, 0444);
290
291/**
292 * DOC: runpm (int)
293 * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
294 * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
295 */
296MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
297module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
298
299/**
300 * DOC: ip_block_mask (uint)
301 * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
302 * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
303 * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
304 * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
305 */
306MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
307module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
308
309/**
310 * DOC: bapm (int)
311 * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
312 * The default -1 (auto, enabled)
313 */
314MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
315module_param_named(bapm, amdgpu_bapm, int, 0444);
316
317/**
318 * DOC: deep_color (int)
319 * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
320 */
321MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
322module_param_named(deep_color, amdgpu_deep_color, int, 0444);
323
324/**
325 * DOC: vm_size (int)
326 * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
327 */
328MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
329module_param_named(vm_size, amdgpu_vm_size, int, 0444);
330
331/**
332 * DOC: vm_fragment_size (int)
333 * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
334 */
335MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
336module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
337
338/**
339 * DOC: vm_block_size (int)
340 * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
341 */
342MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
343module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
344
345/**
346 * DOC: vm_fault_stop (int)
347 * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
348 */
349MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
350module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
351
352/**
353 * DOC: vm_debug (int)
354 * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
355 */
356MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
357module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
358
359/**
360 * DOC: vm_update_mode (int)
361 * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
362 * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
363 */
364MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
365module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
366
367/**
368 * DOC: exp_hw_support (int)
369 * Enable experimental hw support (1 = enable). The default is 0 (disabled).
370 */
371MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
372module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
373
374/**
375 * DOC: dc (int)
376 * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
377 */
378MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
379module_param_named(dc, amdgpu_dc, int, 0444);
380
381/**
382 * DOC: sched_jobs (int)
383 * Override the max number of jobs supported in the sw queue. The default is 32.
384 */
385MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
386module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
387
388/**
389 * DOC: sched_hw_submission (int)
390 * Override the max number of HW submissions. The default is 2.
391 */
392MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
393module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
394
395/**
396 * DOC: ppfeaturemask (uint)
397 * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
398 * The default is the current set of stable power features.
399 */
400MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
401module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
402
403/**
404 * DOC: forcelongtraining (uint)
405 * Force long memory training in resume.
406 * The default is zero, indicates short training in resume.
407 */
408MODULE_PARM_DESC(forcelongtraining, "force memory long training");
409module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
410
411/**
412 * DOC: pcie_gen_cap (uint)
413 * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
414 * The default is 0 (automatic for each asic).
415 */
416MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
417module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
418
419/**
420 * DOC: pcie_lane_cap (uint)
421 * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
422 * The default is 0 (automatic for each asic).
423 */
424MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
425module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
426
427/**
428 * DOC: cg_mask (uint)
429 * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
430 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
431 */
432MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
433module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
434
435/**
436 * DOC: pg_mask (uint)
437 * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
438 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
439 */
440MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
441module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
442
443/**
444 * DOC: sdma_phase_quantum (uint)
445 * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
446 */
447MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
448module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
449
450/**
451 * DOC: disable_cu (charp)
452 * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
453 */
454MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
455module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
456
457/**
458 * DOC: virtual_display (charp)
459 * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
460 * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
461 * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
462 * device at 26:00.0. The default is NULL.
463 */
464MODULE_PARM_DESC(virtual_display,
465 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
466module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
467
468/**
469 * DOC: job_hang_limit (int)
470 * Set how much time allow a job hang and not drop it. The default is 0.
471 */
472MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
473module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
474
475/**
476 * DOC: lbpw (int)
477 * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
478 */
479MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
480module_param_named(lbpw, amdgpu_lbpw, int, 0444);
481
482MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
483module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
484
485/**
486 * DOC: gpu_recovery (int)
487 * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
488 */
489MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
490module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
491
492/**
493 * DOC: emu_mode (int)
494 * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
495 */
496MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
497module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
498
499/**
500 * DOC: ras_enable (int)
501 * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
502 */
503MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
504module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
505
506/**
507 * DOC: ras_mask (uint)
508 * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
509 * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
510 */
511MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
512module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
513
514/**
515 * DOC: si_support (int)
516 * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
517 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
518 * otherwise using amdgpu driver.
519 */
520#ifdef CONFIG_DRM_AMDGPU_SI
521
522#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
523int amdgpu_si_support = 0;
524MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
525#else
526int amdgpu_si_support = 1;
527MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
528#endif
529
530module_param_named(si_support, amdgpu_si_support, int, 0444);
531#endif
532
533/**
534 * DOC: cik_support (int)
535 * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
536 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
537 * otherwise using amdgpu driver.
538 */
539#ifdef CONFIG_DRM_AMDGPU_CIK
540
541#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
542int amdgpu_cik_support = 0;
543MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
544#else
545int amdgpu_cik_support = 1;
546MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
547#endif
548
549module_param_named(cik_support, amdgpu_cik_support, int, 0444);
550#endif
551
552/**
553 * DOC: smu_memory_pool_size (uint)
554 * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
555 * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
556 */
557MODULE_PARM_DESC(smu_memory_pool_size,
558 "reserve gtt for smu debug usage, 0 = disable,"
559 "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
560module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
561
562/**
563 * DOC: async_gfx_ring (int)
564 * It is used to enable gfx rings that could be configured with different prioritites or equal priorities
565 */
566MODULE_PARM_DESC(async_gfx_ring,
567 "Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))");
568module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
569
570/**
571 * DOC: mcbp (int)
572 * It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled)
573 */
574MODULE_PARM_DESC(mcbp,
575 "Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)");
576module_param_named(mcbp, amdgpu_mcbp, int, 0444);
577
578/**
579 * DOC: discovery (int)
580 * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
581 * (-1 = auto (default), 0 = disabled, 1 = enabled)
582 */
583MODULE_PARM_DESC(discovery,
584 "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
585module_param_named(discovery, amdgpu_discovery, int, 0444);
586
587/**
588 * DOC: mes (int)
589 * Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute.
590 * (0 = disabled (default), 1 = enabled)
591 */
592MODULE_PARM_DESC(mes,
593 "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
594module_param_named(mes, amdgpu_mes, int, 0444);
595
596MODULE_PARM_DESC(noretry,
597 "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
598module_param_named(noretry, amdgpu_noretry, int, 0644);
599
600/**
601 * DOC: force_asic_type (int)
602 * A non negative value used to specify the asic type for all supported GPUs.
603 */
604MODULE_PARM_DESC(force_asic_type,
605 "A non negative value used to specify the asic type for all supported GPUs");
606module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
607
608
609
610#ifdef CONFIG_HSA_AMD
611/**
612 * DOC: sched_policy (int)
613 * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription.
614 * Setting 1 disables over-subscription. Setting 2 disables HWS and statically
615 * assigns queues to HQDs.
616 */
617int sched_policy = KFD_SCHED_POLICY_HWS;
618module_param(sched_policy, int, 0444);
619MODULE_PARM_DESC(sched_policy,
620 "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
621
622/**
623 * DOC: hws_max_conc_proc (int)
624 * Maximum number of processes that HWS can schedule concurrently. The maximum is the
625 * number of VMIDs assigned to the HWS, which is also the default.
626 */
627int hws_max_conc_proc = 8;
628module_param(hws_max_conc_proc, int, 0444);
629MODULE_PARM_DESC(hws_max_conc_proc,
630 "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
631
632/**
633 * DOC: cwsr_enable (int)
634 * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in
635 * the middle of a compute wave. Default is 1 to enable this feature. Setting 0
636 * disables it.
637 */
638int cwsr_enable = 1;
639module_param(cwsr_enable, int, 0444);
640MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
641
642/**
643 * DOC: max_num_of_queues_per_device (int)
644 * Maximum number of queues per device. Valid setting is between 1 and 4096. Default
645 * is 4096.
646 */
647int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
648module_param(max_num_of_queues_per_device, int, 0444);
649MODULE_PARM_DESC(max_num_of_queues_per_device,
650 "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
651
652/**
653 * DOC: send_sigterm (int)
654 * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm
655 * but just print errors on dmesg. Setting 1 enables sending sigterm.
656 */
657int send_sigterm;
658module_param(send_sigterm, int, 0444);
659MODULE_PARM_DESC(send_sigterm,
660 "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
661
662/**
663 * DOC: debug_largebar (int)
664 * Set debug_largebar as 1 to enable simulating large-bar capability on non-large bar
665 * system. This limits the VRAM size reported to ROCm applications to the visible
666 * size, usually 256MB.
667 * Default value is 0, diabled.
668 */
669int debug_largebar;
670module_param(debug_largebar, int, 0444);
671MODULE_PARM_DESC(debug_largebar,
672 "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");
673
674/**
675 * DOC: ignore_crat (int)
676 * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
677 * table to get information about AMD APUs. This option can serve as a workaround on
678 * systems with a broken CRAT table.
679 */
680int ignore_crat;
681module_param(ignore_crat, int, 0444);
682MODULE_PARM_DESC(ignore_crat,
683 "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
684
685/**
686 * DOC: halt_if_hws_hang (int)
687 * Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
688 * Setting 1 enables halt on hang.
689 */
690int halt_if_hws_hang;
691module_param(halt_if_hws_hang, int, 0644);
692MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
693
694/**
695 * DOC: hws_gws_support(bool)
696 * Assume that HWS supports GWS barriers regardless of what firmware version
697 * check says. Default value: false (rely on MEC2 firmware version check).
698 */
699bool hws_gws_support;
700module_param(hws_gws_support, bool, 0444);
701MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
702
703/**
704 * DOC: queue_preemption_timeout_ms (int)
705 * queue preemption timeout in ms (1 = Minimum, 9000 = default)
706 */
707int queue_preemption_timeout_ms = 9000;
708module_param(queue_preemption_timeout_ms, int, 0644);
709MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
710
711/**
712 * DOC: debug_evictions(bool)
713 * Enable extra debug messages to help determine the cause of evictions
714 */
715bool debug_evictions;
716module_param(debug_evictions, bool, 0644);
717MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
718#endif
719
720/**
721 * DOC: dcfeaturemask (uint)
722 * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
723 * The default is the current set of stable display features.
724 */
725MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
726module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
727
728/**
729 * DOC: dcdebugmask (uint)
730 * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
731 */
732MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
733module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
734
735/**
736 * DOC: abmlevel (uint)
737 * Override the default ABM (Adaptive Backlight Management) level used for DC
738 * enabled hardware. Requires DMCU to be supported and loaded.
739 * Valid levels are 0-4. A value of 0 indicates that ABM should be disabled by
740 * default. Values 1-4 control the maximum allowable brightness reduction via
741 * the ABM algorithm, with 1 being the least reduction and 4 being the most
742 * reduction.
743 *
744 * Defaults to 0, or disabled. Userspace can still override this level later
745 * after boot.
746 */
747uint amdgpu_dm_abm_level = 0;
748MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
749module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
750
751/**
752 * DOC: tmz (int)
753 * Trusted Memory Zone (TMZ) is a method to protect data being written
754 * to or read from memory.
755 *
756 * The default value: 0 (off). TODO: change to auto till it is completed.
757 */
758MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
759module_param_named(tmz, amdgpu_tmz, int, 0444);
760
761/**
762 * DOC: reset_method (int)
763 * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
764 */
765MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)");
766module_param_named(reset_method, amdgpu_reset_method, int, 0444);
767
768static const struct pci_device_id pciidlist[] = {
769#ifdef CONFIG_DRM_AMDGPU_SI
770 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
771 {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
772 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
773 {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
774 {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
775 {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
776 {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
777 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
778 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
779 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
780 {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
781 {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
782 {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
783 {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
784 {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
785 {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
786 {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
787 {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
788 {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
789 {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
790 {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
791 {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
792 {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
793 {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
794 {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
795 {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
796 {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
797 {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
798 {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
799 {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
800 {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
801 {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
802 {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
803 {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
804 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
805 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
806 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
807 {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
808 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
809 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
810 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
811 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
812 {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
813 {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
814 {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
815 {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
816 {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
817 {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
818 {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
819 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
820 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
821 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
822 {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
823 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
824 {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
825 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
826 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
827 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
828 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
829 {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
830 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
831 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
832 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
833 {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
834 {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
835 {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
836 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
837 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
838 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
839 {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
840 {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
841 {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
842#endif
843#ifdef CONFIG_DRM_AMDGPU_CIK
844 /* Kaveri */
845 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
846 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
847 {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
848 {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
849 {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
850 {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
851 {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
852 {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
853 {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
854 {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
855 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
856 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
857 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
858 {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
859 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
860 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
861 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
862 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
863 {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
864 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
865 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
866 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
867 /* Bonaire */
868 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
869 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
870 {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
871 {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
872 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
873 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
874 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
875 {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
876 {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
877 {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
878 {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
879 /* Hawaii */
880 {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
881 {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
882 {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
883 {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
884 {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
885 {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
886 {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
887 {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
888 {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
889 {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
890 {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
891 {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
892 /* Kabini */
893 {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
894 {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
895 {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
896 {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
897 {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
898 {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
899 {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
900 {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
901 {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
902 {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
903 {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
904 {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
905 {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
906 {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
907 {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
908 {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
909 /* mullins */
910 {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
911 {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
912 {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
913 {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
914 {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
915 {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
916 {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
917 {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
918 {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
919 {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
920 {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
921 {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
922 {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
923 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
924 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
925 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
926#endif
927 /* topaz */
928 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
929 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
930 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
931 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
932 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
933 /* tonga */
934 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
935 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
936 {0x1002, 0x6928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
937 {0x1002, 0x6929, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
938 {0x1002, 0x692B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
939 {0x1002, 0x692F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
940 {0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
941 {0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
942 {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
943 /* fiji */
944 {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
945 {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
946 /* carrizo */
947 {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
948 {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
949 {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
950 {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
951 {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
952 /* stoney */
953 {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
954 /* Polaris11 */
955 {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
956 {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
957 {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
958 {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
959 {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
960 {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
961 {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
962 {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
963 {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
964 /* Polaris10 */
965 {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
966 {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
967 {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
968 {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
969 {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
970 {0x1002, 0x67D0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
971 {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
972 {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
973 {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
974 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
975 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
976 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
977 {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
978 /* Polaris12 */
979 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
980 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
981 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
982 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
983 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
984 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
985 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
986 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
987 /* VEGAM */
988 {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
989 {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
990 {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
991 /* Vega 10 */
992 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
993 {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
994 {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
995 {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
996 {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
997 {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
998 {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
999 {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1000 {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1001 {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1002 {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1003 {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1004 {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1005 {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1006 {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1007 /* Vega 12 */
1008 {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
1009 {0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
1010 {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
1011 {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
1012 {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
1013 /* Vega 20 */
1014 {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1015 {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1016 {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1017 {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1018 {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1019 {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1020 {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1021 /* Raven */
1022 {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
1023 {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
1024 /* Arcturus */
1025 {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
1026 {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
1027 {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
1028 {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
1029 /* Navi10 */
1030 {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1031 {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1032 {0x1002, 0x7318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1033 {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1034 {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1035 {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1036 {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1037 /* Navi14 */
1038 {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
1039 {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
1040 {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
1041 {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
1042
1043 /* Renoir */
1044 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
1045
1046 /* Navi12 */
1047 {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
1048 {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
1049
1050 /* Sienna_Cichlid */
1051 {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1052 {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1053 {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1054 {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1055 {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1056 {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1057
1058 {0, 0, 0}
1059};
1060
1061MODULE_DEVICE_TABLE(pci, pciidlist);
1062
1063static struct drm_driver kms_driver;
1064
1065static int amdgpu_pci_probe(struct pci_dev *pdev,
1066 const struct pci_device_id *ent)
1067{
1068 struct drm_device *dev;
1069 struct amdgpu_device *adev;
1070 unsigned long flags = ent->driver_data;
1071 int ret, retry = 0;
1072 bool supports_atomic = false;
1073
1074 if (!amdgpu_virtual_display &&
1075 amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
1076 supports_atomic = true;
1077
1078 if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
1079 DRM_INFO("This hardware requires experimental hardware support.\n"
1080 "See modparam exp_hw_support\n");
1081 return -ENODEV;
1082 }
1083
1084#ifdef CONFIG_DRM_AMDGPU_SI
1085 if (!amdgpu_si_support) {
1086 switch (flags & AMD_ASIC_MASK) {
1087 case CHIP_TAHITI:
1088 case CHIP_PITCAIRN:
1089 case CHIP_VERDE:
1090 case CHIP_OLAND:
1091 case CHIP_HAINAN:
1092 dev_info(&pdev->dev,
1093 "SI support provided by radeon.\n");
1094 dev_info(&pdev->dev,
1095 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
1096 );
1097 return -ENODEV;
1098 }
1099 }
1100#endif
1101#ifdef CONFIG_DRM_AMDGPU_CIK
1102 if (!amdgpu_cik_support) {
1103 switch (flags & AMD_ASIC_MASK) {
1104 case CHIP_KAVERI:
1105 case CHIP_BONAIRE:
1106 case CHIP_HAWAII:
1107 case CHIP_KABINI:
1108 case CHIP_MULLINS:
1109 dev_info(&pdev->dev,
1110 "CIK support provided by radeon.\n");
1111 dev_info(&pdev->dev,
1112 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
1113 );
1114 return -ENODEV;
1115 }
1116 }
1117#endif
1118
1119 /* Get rid of things like offb */
1120 ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
1121 if (ret)
1122 return ret;
1123
1124 dev = drm_dev_alloc(&kms_driver, &pdev->dev);
1125 if (IS_ERR(dev))
1126 return PTR_ERR(dev);
1127
1128 if (!supports_atomic)
1129 dev->driver_features &= ~DRIVER_ATOMIC;
1130
1131 ret = pci_enable_device(pdev);
1132 if (ret)
1133 goto err_free;
1134
1135 dev->pdev = pdev;
1136
1137 pci_set_drvdata(pdev, dev);
1138
1139 ret = amdgpu_driver_load_kms(dev, ent->driver_data);
1140 if (ret)
1141 goto err_pci;
1142
1143retry_init:
1144 ret = drm_dev_register(dev, ent->driver_data);
1145 if (ret == -EAGAIN && ++retry <= 3) {
1146 DRM_INFO("retry init %d\n", retry);
1147 /* Don't request EX mode too frequently which is attacking */
1148 msleep(5000);
1149 goto retry_init;
1150 } else if (ret)
1151 goto err_pci;
1152
1153 adev = dev->dev_private;
1154 ret = amdgpu_debugfs_init(adev);
1155 if (ret)
1156 DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
1157
1158 return 0;
1159
1160err_pci:
1161 pci_disable_device(pdev);
1162err_free:
1163 drm_dev_put(dev);
1164 return ret;
1165}
1166
1167static void
1168amdgpu_pci_remove(struct pci_dev *pdev)
1169{
1170 struct drm_device *dev = pci_get_drvdata(pdev);
1171
1172#ifdef MODULE
1173 if (THIS_MODULE->state != MODULE_STATE_GOING)
1174#endif
1175 DRM_ERROR("Hotplug removal is not supported\n");
1176 drm_dev_unplug(dev);
1177 amdgpu_driver_unload_kms(dev);
1178 pci_disable_device(pdev);
1179 pci_set_drvdata(pdev, NULL);
1180 drm_dev_put(dev);
1181}
1182
1183static void
1184amdgpu_pci_shutdown(struct pci_dev *pdev)
1185{
1186 struct drm_device *dev = pci_get_drvdata(pdev);
1187 struct amdgpu_device *adev = dev->dev_private;
1188
1189 if (amdgpu_ras_intr_triggered())
1190 return;
1191
1192 /* if we are running in a VM, make sure the device
1193 * torn down properly on reboot/shutdown.
1194 * unfortunately we can't detect certain
1195 * hypervisors so just do this all the time.
1196 */
1197 if (!amdgpu_passthrough(adev))
1198 adev->mp1_state = PP_MP1_STATE_UNLOAD;
1199 amdgpu_device_ip_suspend(adev);
1200 adev->mp1_state = PP_MP1_STATE_NONE;
1201}
1202
1203static int amdgpu_pmops_suspend(struct device *dev)
1204{
1205 struct drm_device *drm_dev = dev_get_drvdata(dev);
1206
1207 return amdgpu_device_suspend(drm_dev, true);
1208}
1209
1210static int amdgpu_pmops_resume(struct device *dev)
1211{
1212 struct drm_device *drm_dev = dev_get_drvdata(dev);
1213
1214 return amdgpu_device_resume(drm_dev, true);
1215}
1216
1217static int amdgpu_pmops_freeze(struct device *dev)
1218{
1219 struct drm_device *drm_dev = dev_get_drvdata(dev);
1220 struct amdgpu_device *adev = drm_dev->dev_private;
1221 int r;
1222
1223 adev->in_hibernate = true;
1224 r = amdgpu_device_suspend(drm_dev, true);
1225 adev->in_hibernate = false;
1226 if (r)
1227 return r;
1228 return amdgpu_asic_reset(adev);
1229}
1230
1231static int amdgpu_pmops_thaw(struct device *dev)
1232{
1233 struct drm_device *drm_dev = dev_get_drvdata(dev);
1234
1235 return amdgpu_device_resume(drm_dev, true);
1236}
1237
1238static int amdgpu_pmops_poweroff(struct device *dev)
1239{
1240 struct drm_device *drm_dev = dev_get_drvdata(dev);
1241
1242 return amdgpu_device_suspend(drm_dev, true);
1243}
1244
1245static int amdgpu_pmops_restore(struct device *dev)
1246{
1247 struct drm_device *drm_dev = dev_get_drvdata(dev);
1248
1249 return amdgpu_device_resume(drm_dev, true);
1250}
1251
1252static int amdgpu_pmops_runtime_suspend(struct device *dev)
1253{
1254 struct pci_dev *pdev = to_pci_dev(dev);
1255 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1256 struct amdgpu_device *adev = drm_dev->dev_private;
1257 int ret, i;
1258
1259 if (!adev->runpm) {
1260 pm_runtime_forbid(dev);
1261 return -EBUSY;
1262 }
1263
1264 /* wait for all rings to drain before suspending */
1265 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1266 struct amdgpu_ring *ring = adev->rings[i];
1267 if (ring && ring->sched.ready) {
1268 ret = amdgpu_fence_wait_empty(ring);
1269 if (ret)
1270 return -EBUSY;
1271 }
1272 }
1273
1274 adev->in_runpm = true;
1275 if (amdgpu_device_supports_boco(drm_dev))
1276 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1277 drm_kms_helper_poll_disable(drm_dev);
1278
1279 ret = amdgpu_device_suspend(drm_dev, false);
1280 if (ret)
1281 return ret;
1282
1283 if (amdgpu_device_supports_boco(drm_dev)) {
1284 /* Only need to handle PCI state in the driver for ATPX
1285 * PCI core handles it for _PR3.
1286 */
1287 if (amdgpu_is_atpx_hybrid()) {
1288 pci_ignore_hotplug(pdev);
1289 } else {
1290 pci_save_state(pdev);
1291 pci_disable_device(pdev);
1292 pci_ignore_hotplug(pdev);
1293 pci_set_power_state(pdev, PCI_D3cold);
1294 }
1295 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
1296 } else if (amdgpu_device_supports_baco(drm_dev)) {
1297 amdgpu_device_baco_enter(drm_dev);
1298 }
1299
1300 return 0;
1301}
1302
1303static int amdgpu_pmops_runtime_resume(struct device *dev)
1304{
1305 struct pci_dev *pdev = to_pci_dev(dev);
1306 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1307 struct amdgpu_device *adev = drm_dev->dev_private;
1308 int ret;
1309
1310 if (!adev->runpm)
1311 return -EINVAL;
1312
1313 if (amdgpu_device_supports_boco(drm_dev)) {
1314 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1315
1316 /* Only need to handle PCI state in the driver for ATPX
1317 * PCI core handles it for _PR3.
1318 */
1319 if (amdgpu_is_atpx_hybrid()) {
1320 pci_set_master(pdev);
1321 } else {
1322 pci_set_power_state(pdev, PCI_D0);
1323 pci_restore_state(pdev);
1324 ret = pci_enable_device(pdev);
1325 if (ret)
1326 return ret;
1327 pci_set_master(pdev);
1328 }
1329 } else if (amdgpu_device_supports_baco(drm_dev)) {
1330 amdgpu_device_baco_exit(drm_dev);
1331 }
1332 ret = amdgpu_device_resume(drm_dev, false);
1333 drm_kms_helper_poll_enable(drm_dev);
1334 if (amdgpu_device_supports_boco(drm_dev))
1335 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
1336 adev->in_runpm = false;
1337 return 0;
1338}
1339
1340static int amdgpu_pmops_runtime_idle(struct device *dev)
1341{
1342 struct drm_device *drm_dev = dev_get_drvdata(dev);
1343 struct amdgpu_device *adev = drm_dev->dev_private;
1344 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
1345 int ret = 1;
1346
1347 if (!adev->runpm) {
1348 pm_runtime_forbid(dev);
1349 return -EBUSY;
1350 }
1351
1352 if (amdgpu_device_has_dc_support(adev)) {
1353 struct drm_crtc *crtc;
1354
1355 drm_modeset_lock_all(drm_dev);
1356
1357 drm_for_each_crtc(crtc, drm_dev) {
1358 if (crtc->state->active) {
1359 ret = -EBUSY;
1360 break;
1361 }
1362 }
1363
1364 drm_modeset_unlock_all(drm_dev);
1365
1366 } else {
1367 struct drm_connector *list_connector;
1368 struct drm_connector_list_iter iter;
1369
1370 mutex_lock(&drm_dev->mode_config.mutex);
1371 drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
1372
1373 drm_connector_list_iter_begin(drm_dev, &iter);
1374 drm_for_each_connector_iter(list_connector, &iter) {
1375 if (list_connector->dpms == DRM_MODE_DPMS_ON) {
1376 ret = -EBUSY;
1377 break;
1378 }
1379 }
1380
1381 drm_connector_list_iter_end(&iter);
1382
1383 drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
1384 mutex_unlock(&drm_dev->mode_config.mutex);
1385 }
1386
1387 if (ret == -EBUSY)
1388 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
1389
1390 pm_runtime_mark_last_busy(dev);
1391 pm_runtime_autosuspend(dev);
1392 return ret;
1393}
1394
1395long amdgpu_drm_ioctl(struct file *filp,
1396 unsigned int cmd, unsigned long arg)
1397{
1398 struct drm_file *file_priv = filp->private_data;
1399 struct drm_device *dev;
1400 long ret;
1401 dev = file_priv->minor->dev;
1402 ret = pm_runtime_get_sync(dev->dev);
1403 if (ret < 0)
1404 goto out;
1405
1406 ret = drm_ioctl(filp, cmd, arg);
1407
1408 pm_runtime_mark_last_busy(dev->dev);
1409out:
1410 pm_runtime_put_autosuspend(dev->dev);
1411 return ret;
1412}
1413
1414static const struct dev_pm_ops amdgpu_pm_ops = {
1415 .suspend = amdgpu_pmops_suspend,
1416 .resume = amdgpu_pmops_resume,
1417 .freeze = amdgpu_pmops_freeze,
1418 .thaw = amdgpu_pmops_thaw,
1419 .poweroff = amdgpu_pmops_poweroff,
1420 .restore = amdgpu_pmops_restore,
1421 .runtime_suspend = amdgpu_pmops_runtime_suspend,
1422 .runtime_resume = amdgpu_pmops_runtime_resume,
1423 .runtime_idle = amdgpu_pmops_runtime_idle,
1424};
1425
1426static int amdgpu_flush(struct file *f, fl_owner_t id)
1427{
1428 struct drm_file *file_priv = f->private_data;
1429 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1430 long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
1431
1432 timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
1433 timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
1434
1435 return timeout >= 0 ? 0 : timeout;
1436}
1437
1438static const struct file_operations amdgpu_driver_kms_fops = {
1439 .owner = THIS_MODULE,
1440 .open = drm_open,
1441 .flush = amdgpu_flush,
1442 .release = drm_release,
1443 .unlocked_ioctl = amdgpu_drm_ioctl,
1444 .mmap = amdgpu_mmap,
1445 .poll = drm_poll,
1446 .read = drm_read,
1447#ifdef CONFIG_COMPAT
1448 .compat_ioctl = amdgpu_kms_compat_ioctl,
1449#endif
1450};
1451
1452int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
1453{
1454 struct drm_file *file;
1455
1456 if (!filp)
1457 return -EINVAL;
1458
1459 if (filp->f_op != &amdgpu_driver_kms_fops) {
1460 return -EINVAL;
1461 }
1462
1463 file = filp->private_data;
1464 *fpriv = file->driver_priv;
1465 return 0;
1466}
1467
1468static struct drm_driver kms_driver = {
1469 .driver_features =
1470 DRIVER_ATOMIC |
1471 DRIVER_GEM |
1472 DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
1473 DRIVER_SYNCOBJ_TIMELINE,
1474 .open = amdgpu_driver_open_kms,
1475 .postclose = amdgpu_driver_postclose_kms,
1476 .lastclose = amdgpu_driver_lastclose_kms,
1477 .irq_handler = amdgpu_irq_handler,
1478 .ioctls = amdgpu_ioctls_kms,
1479 .gem_free_object_unlocked = amdgpu_gem_object_free,
1480 .gem_open_object = amdgpu_gem_object_open,
1481 .gem_close_object = amdgpu_gem_object_close,
1482 .dumb_create = amdgpu_mode_dumb_create,
1483 .dumb_map_offset = amdgpu_mode_dumb_mmap,
1484 .fops = &amdgpu_driver_kms_fops,
1485
1486 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1487 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1488 .gem_prime_export = amdgpu_gem_prime_export,
1489 .gem_prime_import = amdgpu_gem_prime_import,
1490 .gem_prime_vmap = amdgpu_gem_prime_vmap,
1491 .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
1492 .gem_prime_mmap = amdgpu_gem_prime_mmap,
1493
1494 .name = DRIVER_NAME,
1495 .desc = DRIVER_DESC,
1496 .date = DRIVER_DATE,
1497 .major = KMS_DRIVER_MAJOR,
1498 .minor = KMS_DRIVER_MINOR,
1499 .patchlevel = KMS_DRIVER_PATCHLEVEL,
1500};
1501
1502static struct pci_driver amdgpu_kms_pci_driver = {
1503 .name = DRIVER_NAME,
1504 .id_table = pciidlist,
1505 .probe = amdgpu_pci_probe,
1506 .remove = amdgpu_pci_remove,
1507 .shutdown = amdgpu_pci_shutdown,
1508 .driver.pm = &amdgpu_pm_ops,
1509};
1510
1511
1512
1513static int __init amdgpu_init(void)
1514{
1515 int r;
1516
1517 if (vgacon_text_force()) {
1518 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
1519 return -EINVAL;
1520 }
1521
1522 r = amdgpu_sync_init();
1523 if (r)
1524 goto error_sync;
1525
1526 r = amdgpu_fence_slab_init();
1527 if (r)
1528 goto error_fence;
1529
1530 DRM_INFO("amdgpu kernel modesetting enabled.\n");
1531 kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
1532 amdgpu_register_atpx_handler();
1533
1534 /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
1535 amdgpu_amdkfd_init();
1536
1537 /* let modprobe override vga console setting */
1538 return pci_register_driver(&amdgpu_kms_pci_driver);
1539
1540error_fence:
1541 amdgpu_sync_fini();
1542
1543error_sync:
1544 return r;
1545}
1546
1547static void __exit amdgpu_exit(void)
1548{
1549 amdgpu_amdkfd_fini();
1550 pci_unregister_driver(&amdgpu_kms_pci_driver);
1551 amdgpu_unregister_atpx_handler();
1552 amdgpu_sync_fini();
1553 amdgpu_fence_slab_fini();
1554 mmu_notifier_synchronize();
1555}
1556
1557module_init(amdgpu_init);
1558module_exit(amdgpu_exit);
1559
1560MODULE_AUTHOR(DRIVER_AUTHOR);
1561MODULE_DESCRIPTION(DRIVER_DESC);
1562MODULE_LICENSE("GPL and additional rights");