Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AMDGPU_DM_H__
27#define __AMDGPU_DM_H__
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
31#include "dc.h"
32
33/*
34 * This file contains the definition for amdgpu_display_manager
35 * and its API for amdgpu driver's use.
36 * This component provides all the display related functionality
37 * and this is the only component that calls DAL API.
38 * The API contained here intended for amdgpu driver use.
39 * The API that is called directly from KMS framework is located
40 * in amdgpu_dm_kms.h file
41 */
42
43#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
44/*
45#include "include/amdgpu_dal_power_if.h"
46#include "amdgpu_dm_irq.h"
47*/
48
49#include "irq_types.h"
50#include "signal_types.h"
51
52/* Forward declarations */
53struct amdgpu_device;
54struct drm_device;
55struct amdgpu_dm_irq_handler_data;
56
57struct amdgpu_dm_prev_state {
58 struct drm_framebuffer *fb;
59 int32_t x;
60 int32_t y;
61 struct drm_display_mode mode;
62};
63
64struct common_irq_params {
65 struct amdgpu_device *adev;
66 enum dc_irq_source irq_src;
67};
68
69struct irq_list_head {
70 struct list_head head;
71 /* In case this interrupt needs post-processing, 'work' will be queued*/
72 struct work_struct work;
73};
74
75#if defined(CONFIG_DRM_AMD_DC_FBC)
76struct dm_comressor_info {
77 void *cpu_addr;
78 struct amdgpu_bo *bo_ptr;
79 uint64_t gpu_addr;
80};
81#endif
82
83
84struct amdgpu_display_manager {
85 struct dal *dal;
86 struct dc *dc;
87 struct cgs_device *cgs_device;
88
89 struct amdgpu_device *adev; /*AMD base driver*/
90 struct drm_device *ddev; /*DRM base driver*/
91 u16 display_indexes_num;
92
93 struct amdgpu_dm_prev_state prev_state;
94
95 /*
96 * 'irq_source_handler_table' holds a list of handlers
97 * per (DAL) IRQ source.
98 *
99 * Each IRQ source may need to be handled at different contexts.
100 * By 'context' we mean, for example:
101 * - The ISR context, which is the direct interrupt handler.
102 * - The 'deferred' context - this is the post-processing of the
103 * interrupt, but at a lower priority.
104 *
105 * Note that handlers are called in the same order as they were
106 * registered (FIFO).
107 */
108 struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
109 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
110
111 struct common_irq_params
112 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
113
114 struct common_irq_params
115 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
116
117 /* this spin lock synchronizes access to 'irq_handler_list_table' */
118 spinlock_t irq_handler_list_table_lock;
119
120 struct backlight_device *backlight_dev;
121
122 const struct dc_link *backlight_link;
123
124 struct work_struct mst_hotplug_work;
125
126 struct mod_freesync *freesync_module;
127
128 /**
129 * Caches device atomic state for suspend/resume
130 */
131 struct drm_atomic_state *cached_state;
132#if defined(CONFIG_DRM_AMD_DC_FBC)
133 struct dm_comressor_info compressor;
134#endif
135};
136
137struct amdgpu_dm_connector {
138
139 struct drm_connector base;
140 uint32_t connector_id;
141
142 /* we need to mind the EDID between detect
143 and get modes due to analog/digital/tvencoder */
144 struct edid *edid;
145
146 /* shared with amdgpu */
147 struct amdgpu_hpd hpd;
148
149 /* number of modes generated from EDID at 'dc_sink' */
150 int num_modes;
151
152 /* The 'old' sink - before an HPD.
153 * The 'current' sink is in dc_link->sink. */
154 struct dc_sink *dc_sink;
155 struct dc_link *dc_link;
156 struct dc_sink *dc_em_sink;
157
158 /* DM only */
159 struct drm_dp_mst_topology_mgr mst_mgr;
160 struct amdgpu_dm_dp_aux dm_dp_aux;
161 struct drm_dp_mst_port *port;
162 struct amdgpu_dm_connector *mst_port;
163 struct amdgpu_encoder *mst_encoder;
164
165 /* TODO see if we can merge with ddc_bus or make a dm_connector */
166 struct amdgpu_i2c_adapter *i2c;
167
168 /* Monitor range limits */
169 int min_vfreq ;
170 int max_vfreq ;
171 int pixel_clock_mhz;
172
173 /*freesync caps*/
174 struct mod_freesync_caps caps;
175
176 struct mutex hpd_lock;
177
178 bool fake_enable;
179
180 bool mst_connected;
181};
182
183#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
184
185extern const struct amdgpu_ip_block_version dm_ip_block;
186
187struct amdgpu_framebuffer;
188struct amdgpu_display_manager;
189struct dc_validation_set;
190struct dc_plane_state;
191
192struct dm_plane_state {
193 struct drm_plane_state base;
194 struct dc_plane_state *dc_state;
195};
196
197struct dm_crtc_state {
198 struct drm_crtc_state base;
199 struct dc_stream_state *stream;
200
201 int crc_skip_count;
202 bool crc_enabled;
203};
204
205#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
206
207struct dm_atomic_state {
208 struct drm_atomic_state base;
209
210 struct dc_state *context;
211};
212
213#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
214
215struct dm_connector_state {
216 struct drm_connector_state base;
217
218 enum amdgpu_rmx_type scaling;
219 uint8_t underscan_vborder;
220 uint8_t underscan_hborder;
221 bool underscan_enable;
222 struct mod_freesync_user_enable user_enable;
223};
224
225#define to_dm_connector_state(x)\
226 container_of((x), struct dm_connector_state, base)
227
228void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
229struct drm_connector_state *
230amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
231int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
232 struct drm_connector_state *state,
233 struct drm_property *property,
234 uint64_t val);
235
236int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
237 const struct drm_connector_state *state,
238 struct drm_property *property,
239 uint64_t *val);
240
241int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
242
243void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
244 struct amdgpu_dm_connector *aconnector,
245 int connector_type,
246 struct dc_link *link,
247 int link_index);
248
249int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
250 struct drm_display_mode *mode);
251
252void dm_restore_drm_connector_state(struct drm_device *dev,
253 struct drm_connector *connector);
254
255void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
256 struct edid *edid);
257
258void
259amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
260
261/* amdgpu_dm_crc.c */
262#ifdef CONFIG_DEBUG_FS
263int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
264 size_t *values_cnt);
265void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
266#else
267#define amdgpu_dm_crtc_set_crc_source NULL
268#define amdgpu_dm_crtc_handle_crc_irq(x)
269#endif
270
271#define MAX_COLOR_LUT_ENTRIES 4096
272/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
273#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
274
275void amdgpu_dm_init_color_mod(void);
276int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
277 struct dc_plane_state *dc_plane_state);
278void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc);
279int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc);
280
281extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
282
283#endif /* __AMDGPU_DM_H__ */
1/*
2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __AMDGPU_DM_H__
27#define __AMDGPU_DM_H__
28
29#include <drm/display/drm_dp_mst_helper.h>
30#include <drm/drm_atomic.h>
31#include <drm/drm_connector.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_plane.h>
34
35/*
36 * This file contains the definition for amdgpu_display_manager
37 * and its API for amdgpu driver's use.
38 * This component provides all the display related functionality
39 * and this is the only component that calls DAL API.
40 * The API contained here intended for amdgpu driver use.
41 * The API that is called directly from KMS framework is located
42 * in amdgpu_dm_kms.h file
43 */
44
45#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
46
47#define AMDGPU_DM_MAX_CRTC 6
48
49#define AMDGPU_DM_MAX_NUM_EDP 2
50
51#define AMDGPU_DMUB_NOTIFICATION_MAX 5
52
53/*
54#include "include/amdgpu_dal_power_if.h"
55#include "amdgpu_dm_irq.h"
56*/
57
58#include "irq_types.h"
59#include "signal_types.h"
60#include "amdgpu_dm_crc.h"
61struct aux_payload;
62struct set_config_cmd_payload;
63enum aux_return_code_type;
64enum set_config_status;
65
66/* Forward declarations */
67struct amdgpu_device;
68struct amdgpu_crtc;
69struct drm_device;
70struct dc;
71struct amdgpu_bo;
72struct dmub_srv;
73struct dc_plane_state;
74struct dmub_notification;
75
76struct common_irq_params {
77 struct amdgpu_device *adev;
78 enum dc_irq_source irq_src;
79 atomic64_t previous_timestamp;
80};
81
82/**
83 * struct dm_compressor_info - Buffer info used by frame buffer compression
84 * @cpu_addr: MMIO cpu addr
85 * @bo_ptr: Pointer to the buffer object
86 * @gpu_addr: MMIO gpu addr
87 */
88struct dm_compressor_info {
89 void *cpu_addr;
90 struct amdgpu_bo *bo_ptr;
91 uint64_t gpu_addr;
92};
93
94typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
95
96/**
97 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
98 *
99 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
100 * @dmub_notify: notification for callback function
101 * @adev: amdgpu_device pointer
102 */
103struct dmub_hpd_work {
104 struct work_struct handle_hpd_work;
105 struct dmub_notification *dmub_notify;
106 struct amdgpu_device *adev;
107};
108
109/**
110 * struct vblank_control_work - Work data for vblank control
111 * @work: Kernel work data for the work event
112 * @dm: amdgpu display manager device
113 * @acrtc: amdgpu CRTC instance for which the event has occurred
114 * @stream: DC stream for which the event has occurred
115 * @enable: true if enabling vblank
116 */
117struct vblank_control_work {
118 struct work_struct work;
119 struct amdgpu_display_manager *dm;
120 struct amdgpu_crtc *acrtc;
121 struct dc_stream_state *stream;
122 bool enable;
123};
124
125/**
126 * struct amdgpu_dm_backlight_caps - Information about backlight
127 *
128 * Describe the backlight support for ACPI or eDP AUX.
129 */
130struct amdgpu_dm_backlight_caps {
131 /**
132 * @ext_caps: Keep the data struct with all the information about the
133 * display support for HDR.
134 */
135 union dpcd_sink_ext_caps *ext_caps;
136 /**
137 * @aux_min_input_signal: Min brightness value supported by the display
138 */
139 u32 aux_min_input_signal;
140 /**
141 * @aux_max_input_signal: Max brightness value supported by the display
142 * in nits.
143 */
144 u32 aux_max_input_signal;
145 /**
146 * @min_input_signal: minimum possible input in range 0-255.
147 */
148 int min_input_signal;
149 /**
150 * @max_input_signal: maximum possible input in range 0-255.
151 */
152 int max_input_signal;
153 /**
154 * @caps_valid: true if these values are from the ACPI interface.
155 */
156 bool caps_valid;
157 /**
158 * @aux_support: Describes if the display supports AUX backlight.
159 */
160 bool aux_support;
161};
162
163/**
164 * struct dal_allocation - Tracks mapped FB memory for SMU communication
165 * @list: list of dal allocations
166 * @bo: GPU buffer object
167 * @cpu_ptr: CPU virtual address of the GPU buffer object
168 * @gpu_addr: GPU virtual address of the GPU buffer object
169 */
170struct dal_allocation {
171 struct list_head list;
172 struct amdgpu_bo *bo;
173 void *cpu_ptr;
174 u64 gpu_addr;
175};
176
177/**
178 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
179 * offload work
180 */
181struct hpd_rx_irq_offload_work_queue {
182 /**
183 * @wq: workqueue structure to queue offload work.
184 */
185 struct workqueue_struct *wq;
186 /**
187 * @offload_lock: To protect fields of offload work queue.
188 */
189 spinlock_t offload_lock;
190 /**
191 * @is_handling_link_loss: Used to prevent inserting link loss event when
192 * we're handling link loss
193 */
194 bool is_handling_link_loss;
195 /**
196 * @aconnector: The aconnector that this work queue is attached to
197 */
198 struct amdgpu_dm_connector *aconnector;
199};
200
201/**
202 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
203 */
204struct hpd_rx_irq_offload_work {
205 /**
206 * @work: offload work
207 */
208 struct work_struct work;
209 /**
210 * @data: reference irq data which is used while handling offload work
211 */
212 union hpd_irq_data data;
213 /**
214 * @offload_wq: offload work queue that this work is queued to
215 */
216 struct hpd_rx_irq_offload_work_queue *offload_wq;
217};
218
219/**
220 * struct amdgpu_display_manager - Central amdgpu display manager device
221 *
222 * @dc: Display Core control structure
223 * @adev: AMDGPU base driver structure
224 * @ddev: DRM base driver structure
225 * @display_indexes_num: Max number of display streams supported
226 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
227 * @backlight_dev: Backlight control device
228 * @backlight_link: Link on which to control backlight
229 * @backlight_caps: Capabilities of the backlight device
230 * @freesync_module: Module handling freesync calculations
231 * @hdcp_workqueue: AMDGPU content protection queue
232 * @fw_dmcu: Reference to DMCU firmware
233 * @dmcu_fw_version: Version of the DMCU firmware
234 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
235 * @cached_state: Caches device atomic state for suspend/resume
236 * @cached_dc_state: Cached state of content streams
237 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
238 * @force_timing_sync: set via debugfs. When set, indicates that all connected
239 * displays will be forced to synchronize.
240 * @dmcub_trace_event_en: enable dmcub trace events
241 * @dmub_outbox_params: DMUB Outbox parameters
242 * @num_of_edps: number of backlight eDPs
243 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the
244 * driver when true
245 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB
246 * transfers are done
247 * @delayed_hpd_wq: work queue used to delay DMUB HPD work
248 */
249struct amdgpu_display_manager {
250
251 struct dc *dc;
252
253 /**
254 * @dmub_srv:
255 *
256 * DMUB service, used for controlling the DMUB on hardware
257 * that supports it. The pointer to the dmub_srv will be
258 * NULL on hardware that does not support it.
259 */
260 struct dmub_srv *dmub_srv;
261
262 /**
263 * @dmub_notify:
264 *
265 * Notification from DMUB.
266 */
267
268 struct dmub_notification *dmub_notify;
269
270 /**
271 * @dmub_callback:
272 *
273 * Callback functions to handle notification from DMUB.
274 */
275
276 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
277
278 /**
279 * @dmub_thread_offload:
280 *
281 * Flag to indicate if callback is offload.
282 */
283
284 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
285
286 /**
287 * @dmub_fb_info:
288 *
289 * Framebuffer regions for the DMUB.
290 */
291 struct dmub_srv_fb_info *dmub_fb_info;
292
293 /**
294 * @dmub_fw:
295 *
296 * DMUB firmware, required on hardware that has DMUB support.
297 */
298 const struct firmware *dmub_fw;
299
300 /**
301 * @dmub_bo:
302 *
303 * Buffer object for the DMUB.
304 */
305 struct amdgpu_bo *dmub_bo;
306
307 /**
308 * @dmub_bo_gpu_addr:
309 *
310 * GPU virtual address for the DMUB buffer object.
311 */
312 u64 dmub_bo_gpu_addr;
313
314 /**
315 * @dmub_bo_cpu_addr:
316 *
317 * CPU address for the DMUB buffer object.
318 */
319 void *dmub_bo_cpu_addr;
320
321 /**
322 * @dmcub_fw_version:
323 *
324 * DMCUB firmware version.
325 */
326 uint32_t dmcub_fw_version;
327
328 /**
329 * @cgs_device:
330 *
331 * The Common Graphics Services device. It provides an interface for
332 * accessing registers.
333 */
334 struct cgs_device *cgs_device;
335
336 struct amdgpu_device *adev;
337 struct drm_device *ddev;
338 u16 display_indexes_num;
339
340 /**
341 * @atomic_obj:
342 *
343 * In combination with &dm_atomic_state it helps manage
344 * global atomic state that doesn't map cleanly into existing
345 * drm resources, like &dc_context.
346 */
347 struct drm_private_obj atomic_obj;
348
349 /**
350 * @dc_lock:
351 *
352 * Guards access to DC functions that can issue register write
353 * sequences.
354 */
355 struct mutex dc_lock;
356
357 /**
358 * @audio_lock:
359 *
360 * Guards access to audio instance changes.
361 */
362 struct mutex audio_lock;
363
364 /**
365 * @audio_component:
366 *
367 * Used to notify ELD changes to sound driver.
368 */
369 struct drm_audio_component *audio_component;
370
371 /**
372 * @audio_registered:
373 *
374 * True if the audio component has been registered
375 * successfully, false otherwise.
376 */
377 bool audio_registered;
378
379 /**
380 * @irq_handler_list_low_tab:
381 *
382 * Low priority IRQ handler table.
383 *
384 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
385 * source. Low priority IRQ handlers are deferred to a workqueue to be
386 * processed. Hence, they can sleep.
387 *
388 * Note that handlers are called in the same order as they were
389 * registered (FIFO).
390 */
391 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
392
393 /**
394 * @irq_handler_list_high_tab:
395 *
396 * High priority IRQ handler table.
397 *
398 * It is a n*m table, same as &irq_handler_list_low_tab. However,
399 * handlers in this table are not deferred and are called immediately.
400 */
401 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
402
403 /**
404 * @pflip_params:
405 *
406 * Page flip IRQ parameters, passed to registered handlers when
407 * triggered.
408 */
409 struct common_irq_params
410 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
411
412 /**
413 * @vblank_params:
414 *
415 * Vertical blanking IRQ parameters, passed to registered handlers when
416 * triggered.
417 */
418 struct common_irq_params
419 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
420
421 /**
422 * @vline0_params:
423 *
424 * OTG vertical interrupt0 IRQ parameters, passed to registered
425 * handlers when triggered.
426 */
427 struct common_irq_params
428 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
429
430 /**
431 * @vupdate_params:
432 *
433 * Vertical update IRQ parameters, passed to registered handlers when
434 * triggered.
435 */
436 struct common_irq_params
437 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
438
439 /**
440 * @dmub_trace_params:
441 *
442 * DMUB trace event IRQ parameters, passed to registered handlers when
443 * triggered.
444 */
445 struct common_irq_params
446 dmub_trace_params[1];
447
448 struct common_irq_params
449 dmub_outbox_params[1];
450
451 spinlock_t irq_handler_list_table_lock;
452
453 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
454
455 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
456
457 uint8_t num_of_edps;
458
459 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
460
461 struct mod_freesync *freesync_module;
462#ifdef CONFIG_DRM_AMD_DC_HDCP
463 struct hdcp_workqueue *hdcp_workqueue;
464#endif
465
466 /**
467 * @vblank_control_workqueue:
468 *
469 * Deferred work for vblank control events.
470 */
471 struct workqueue_struct *vblank_control_workqueue;
472
473 struct drm_atomic_state *cached_state;
474 struct dc_state *cached_dc_state;
475
476 struct dm_compressor_info compressor;
477
478 const struct firmware *fw_dmcu;
479 uint32_t dmcu_fw_version;
480 /**
481 * @soc_bounding_box:
482 *
483 * gpu_info FW provided soc bounding box struct or 0 if not
484 * available in FW
485 */
486 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
487
488 /**
489 * @active_vblank_irq_count:
490 *
491 * number of currently active vblank irqs
492 */
493 uint32_t active_vblank_irq_count;
494
495#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
496 /**
497 * @crc_rd_wrk:
498 *
499 * Work to be executed in a separate thread to communicate with PSP.
500 */
501 struct crc_rd_work *crc_rd_wrk;
502#endif
503 /**
504 * @hpd_rx_offload_wq:
505 *
506 * Work queue to offload works of hpd_rx_irq
507 */
508 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
509 /**
510 * @mst_encoders:
511 *
512 * fake encoders used for DP MST.
513 */
514 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
515 bool force_timing_sync;
516 bool disable_hpd_irq;
517 bool dmcub_trace_event_en;
518 /**
519 * @da_list:
520 *
521 * DAL fb memory allocation list, for communication with SMU.
522 */
523 struct list_head da_list;
524 struct completion dmub_aux_transfer_done;
525 struct workqueue_struct *delayed_hpd_wq;
526
527 /**
528 * @brightness:
529 *
530 * cached backlight values.
531 */
532 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
533 /**
534 * @actual_brightness:
535 *
536 * last successfully applied backlight values.
537 */
538 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
539
540 /**
541 * @aux_hpd_discon_quirk:
542 *
543 * quirk for hpd discon while aux is on-going.
544 * occurred on certain intel platform
545 */
546 bool aux_hpd_discon_quirk;
547
548 /**
549 * @dpia_aux_lock:
550 *
551 * Guards access to DPIA AUX
552 */
553 struct mutex dpia_aux_lock;
554};
555
556enum dsc_clock_force_state {
557 DSC_CLK_FORCE_DEFAULT = 0,
558 DSC_CLK_FORCE_ENABLE,
559 DSC_CLK_FORCE_DISABLE,
560};
561
562struct dsc_preferred_settings {
563 enum dsc_clock_force_state dsc_force_enable;
564 uint32_t dsc_num_slices_v;
565 uint32_t dsc_num_slices_h;
566 uint32_t dsc_bits_per_pixel;
567 bool dsc_force_disable_passthrough;
568};
569
570enum mst_progress_status {
571 MST_STATUS_DEFAULT = 0,
572 MST_PROBE = BIT(0),
573 MST_REMOTE_EDID = BIT(1),
574 MST_ALLOCATE_NEW_PAYLOAD = BIT(2),
575 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
576};
577
578struct amdgpu_dm_connector {
579
580 struct drm_connector base;
581 uint32_t connector_id;
582
583 /* we need to mind the EDID between detect
584 and get modes due to analog/digital/tvencoder */
585 struct edid *edid;
586
587 /* shared with amdgpu */
588 struct amdgpu_hpd hpd;
589
590 /* number of modes generated from EDID at 'dc_sink' */
591 int num_modes;
592
593 /* The 'old' sink - before an HPD.
594 * The 'current' sink is in dc_link->sink. */
595 struct dc_sink *dc_sink;
596 struct dc_link *dc_link;
597
598 /**
599 * @dc_em_sink: Reference to the emulated (virtual) sink.
600 */
601 struct dc_sink *dc_em_sink;
602
603 /* DM only */
604 struct drm_dp_mst_topology_mgr mst_mgr;
605 struct amdgpu_dm_dp_aux dm_dp_aux;
606 struct drm_dp_mst_port *port;
607 struct amdgpu_dm_connector *mst_port;
608 struct drm_dp_aux *dsc_aux;
609 /* TODO see if we can merge with ddc_bus or make a dm_connector */
610 struct amdgpu_i2c_adapter *i2c;
611
612 /* Monitor range limits */
613 /**
614 * @min_vfreq: Minimal frequency supported by the display in Hz. This
615 * value is set to zero when there is no FreeSync support.
616 */
617 int min_vfreq;
618
619 /**
620 * @max_vfreq: Maximum frequency supported by the display in Hz. This
621 * value is set to zero when there is no FreeSync support.
622 */
623 int max_vfreq ;
624 int pixel_clock_mhz;
625
626 /* Audio instance - protected by audio_lock. */
627 int audio_inst;
628
629 struct mutex hpd_lock;
630
631 bool fake_enable;
632#ifdef CONFIG_DEBUG_FS
633 uint32_t debugfs_dpcd_address;
634 uint32_t debugfs_dpcd_size;
635#endif
636 bool force_yuv420_output;
637 struct dsc_preferred_settings dsc_settings;
638 union dp_downstream_port_present mst_downstream_port_present;
639 /* Cached display modes */
640 struct drm_display_mode freesync_vid_base;
641
642 int psr_skip_count;
643
644 /* Record progress status of mst*/
645 uint8_t mst_status;
646};
647
648static inline void amdgpu_dm_set_mst_status(uint8_t *status,
649 uint8_t flags, bool set)
650{
651 if (set)
652 *status |= flags;
653 else
654 *status &= ~flags;
655}
656
657#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
658
659extern const struct amdgpu_ip_block_version dm_ip_block;
660
661struct dm_plane_state {
662 struct drm_plane_state base;
663 struct dc_plane_state *dc_state;
664};
665
666struct dm_crtc_state {
667 struct drm_crtc_state base;
668 struct dc_stream_state *stream;
669
670 bool cm_has_degamma;
671 bool cm_is_degamma_srgb;
672
673 bool mpo_requested;
674
675 int update_type;
676 int active_planes;
677
678 int crc_skip_count;
679
680 bool freesync_vrr_info_changed;
681
682 bool dsc_force_changed;
683 bool vrr_supported;
684 struct mod_freesync_config freesync_config;
685 struct dc_info_packet vrr_infopacket;
686
687 int abm_level;
688};
689
690#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
691
692struct dm_atomic_state {
693 struct drm_private_state base;
694
695 struct dc_state *context;
696};
697
698#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
699
700struct dm_connector_state {
701 struct drm_connector_state base;
702
703 enum amdgpu_rmx_type scaling;
704 uint8_t underscan_vborder;
705 uint8_t underscan_hborder;
706 bool underscan_enable;
707 bool freesync_capable;
708#ifdef CONFIG_DRM_AMD_DC_HDCP
709 bool update_hdcp;
710#endif
711 uint8_t abm_level;
712 int vcpi_slots;
713 uint64_t pbn;
714};
715
716/**
717 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
718 *
719 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
720 * struct is useful to keep track of the display-specific information about
721 * FreeSync.
722 */
723struct amdgpu_hdmi_vsdb_info {
724 /**
725 * @amd_vsdb_version: Vendor Specific Data Block Version, should be
726 * used to determine which Vendor Specific InfoFrame (VSIF) to send.
727 */
728 unsigned int amd_vsdb_version;
729
730 /**
731 * @freesync_supported: FreeSync Supported.
732 */
733 bool freesync_supported;
734
735 /**
736 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
737 */
738 unsigned int min_refresh_rate_hz;
739
740 /**
741 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
742 */
743 unsigned int max_refresh_rate_hz;
744};
745
746
747#define to_dm_connector_state(x)\
748 container_of((x), struct dm_connector_state, base)
749
750void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
751struct drm_connector_state *
752amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
753int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
754 struct drm_connector_state *state,
755 struct drm_property *property,
756 uint64_t val);
757
758int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
759 const struct drm_connector_state *state,
760 struct drm_property *property,
761 uint64_t *val);
762
763int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
764
765void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
766 struct amdgpu_dm_connector *aconnector,
767 int connector_type,
768 struct dc_link *link,
769 int link_index);
770
771enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
772 struct drm_display_mode *mode);
773
774void dm_restore_drm_connector_state(struct drm_device *dev,
775 struct drm_connector *connector);
776
777void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
778 struct edid *edid);
779
780void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
781
782#define MAX_COLOR_LUT_ENTRIES 4096
783/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
784#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
785
786void amdgpu_dm_init_color_mod(void);
787int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
788int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
789int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
790 struct dc_plane_state *dc_plane_state);
791
792void amdgpu_dm_update_connector_after_detect(
793 struct amdgpu_dm_connector *aconnector);
794
795extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
796
797int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
798 struct aux_payload *payload, enum aux_return_code_type *operation_result);
799
800int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
801 struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
802
803bool check_seamless_boot_capability(struct amdgpu_device *adev);
804
805struct dc_stream_state *
806 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
807 const struct drm_display_mode *drm_mode,
808 const struct dm_connector_state *dm_state,
809 const struct dc_stream_state *old_stream);
810
811int dm_atomic_get_state(struct drm_atomic_state *state,
812 struct dm_atomic_state **dm_state);
813
814struct amdgpu_dm_connector *
815amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
816 struct drm_crtc *crtc);
817
818int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
819#endif /* __AMDGPU_DM_H__ */