Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
28#include <linux/version.h>
29#include <linux/i2c.h>
30
31#include <drm/drm_probe_helper.h>
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_edid.h>
34
35#include "dm_services.h"
36#include "amdgpu.h"
37#include "dc.h"
38#include "amdgpu_dm.h"
39#include "amdgpu_dm_irq.h"
40#include "amdgpu_dm_mst_types.h"
41
42#include "dm_helpers.h"
43
44/* dm_helpers_parse_edid_caps
45 *
46 * Parse edid caps
47 *
48 * @edid: [in] pointer to edid
49 * edid_caps: [in] pointer to edid caps
50 * @return
51 * void
52 * */
53enum dc_edid_status dm_helpers_parse_edid_caps(
54 struct dc_context *ctx,
55 const struct dc_edid *edid,
56 struct dc_edid_caps *edid_caps)
57{
58 struct edid *edid_buf = (struct edid *) edid->raw_edid;
59 struct cea_sad *sads;
60 int sad_count = -1;
61 int sadb_count = -1;
62 int i = 0;
63 int j = 0;
64 uint8_t *sadb = NULL;
65
66 enum dc_edid_status result = EDID_OK;
67
68 if (!edid_caps || !edid)
69 return EDID_BAD_INPUT;
70
71 if (!drm_edid_is_valid(edid_buf))
72 result = EDID_BAD_CHECKSUM;
73
74 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
75 ((uint16_t) edid_buf->mfg_id[1])<<8;
76 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
77 ((uint16_t) edid_buf->prod_code[1])<<8;
78 edid_caps->serial_number = edid_buf->serial;
79 edid_caps->manufacture_week = edid_buf->mfg_week;
80 edid_caps->manufacture_year = edid_buf->mfg_year;
81
82 /* One of the four detailed_timings stores the monitor name. It's
83 * stored in an array of length 13. */
84 for (i = 0; i < 4; i++) {
85 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
86 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
87 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
88 break;
89
90 edid_caps->display_name[j] =
91 edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
92 j++;
93 }
94 }
95 }
96
97 edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
98 (struct edid *) edid->raw_edid);
99
100 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
101 if (sad_count <= 0)
102 return result;
103
104 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
105 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
106 struct cea_sad *sad = &sads[i];
107
108 edid_caps->audio_modes[i].format_code = sad->format;
109 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
110 edid_caps->audio_modes[i].sample_rate = sad->freq;
111 edid_caps->audio_modes[i].sample_size = sad->byte2;
112 }
113
114 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
115
116 if (sadb_count < 0) {
117 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
118 sadb_count = 0;
119 }
120
121 if (sadb_count)
122 edid_caps->speaker_flags = sadb[0];
123 else
124 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
125
126 kfree(sads);
127 kfree(sadb);
128
129 return result;
130}
131
132static void get_payload_table(
133 struct amdgpu_dm_connector *aconnector,
134 struct dp_mst_stream_allocation_table *proposed_table)
135{
136 int i;
137 struct drm_dp_mst_topology_mgr *mst_mgr =
138 &aconnector->mst_port->mst_mgr;
139
140 mutex_lock(&mst_mgr->payload_lock);
141
142 proposed_table->stream_count = 0;
143
144 /* number of active streams */
145 for (i = 0; i < mst_mgr->max_payloads; i++) {
146 if (mst_mgr->payloads[i].num_slots == 0)
147 break; /* end of vcp_id table */
148
149 ASSERT(mst_mgr->payloads[i].payload_state !=
150 DP_PAYLOAD_DELETE_LOCAL);
151
152 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
153 mst_mgr->payloads[i].payload_state ==
154 DP_PAYLOAD_REMOTE) {
155
156 struct dp_mst_stream_allocation *sa =
157 &proposed_table->stream_allocations[
158 proposed_table->stream_count];
159
160 sa->slot_count = mst_mgr->payloads[i].num_slots;
161 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
162 proposed_table->stream_count++;
163 }
164 }
165
166 mutex_unlock(&mst_mgr->payload_lock);
167}
168
169void dm_helpers_dp_update_branch_info(
170 struct dc_context *ctx,
171 const struct dc_link *link)
172{}
173
174/*
175 * Writes payload allocation table in immediate downstream device.
176 */
177bool dm_helpers_dp_mst_write_payload_allocation_table(
178 struct dc_context *ctx,
179 const struct dc_stream_state *stream,
180 struct dp_mst_stream_allocation_table *proposed_table,
181 bool enable)
182{
183 struct amdgpu_dm_connector *aconnector;
184 struct dm_connector_state *dm_conn_state;
185 struct drm_dp_mst_topology_mgr *mst_mgr;
186 struct drm_dp_mst_port *mst_port;
187 bool ret;
188
189 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
190 /* Accessing the connector state is required for vcpi_slots allocation
191 * and directly relies on behaviour in commit check
192 * that blocks before commit guaranteeing that the state
193 * is not gonna be swapped while still in use in commit tail */
194
195 if (!aconnector || !aconnector->mst_port)
196 return false;
197
198 dm_conn_state = to_dm_connector_state(aconnector->base.state);
199
200 mst_mgr = &aconnector->mst_port->mst_mgr;
201
202 if (!mst_mgr->mst_state)
203 return false;
204
205 mst_port = aconnector->port;
206
207 if (enable) {
208
209 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
210 dm_conn_state->pbn,
211 dm_conn_state->vcpi_slots);
212 if (!ret)
213 return false;
214
215 } else {
216 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
217 }
218
219 /* It's OK for this to fail */
220 drm_dp_update_payload_part1(mst_mgr);
221
222 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
223 * AUX message. The sequence is slot 1-63 allocated sequence for each
224 * stream. AMD ASIC stream slot allocation should follow the same
225 * sequence. copy DRM MST allocation to dc */
226
227 get_payload_table(aconnector, proposed_table);
228
229 return true;
230}
231
232/*
233 * poll pending down reply
234 */
235void dm_helpers_dp_mst_poll_pending_down_reply(
236 struct dc_context *ctx,
237 const struct dc_link *link)
238{}
239
240/*
241 * Clear payload allocation table before enable MST DP link.
242 */
243void dm_helpers_dp_mst_clear_payload_allocation_table(
244 struct dc_context *ctx,
245 const struct dc_link *link)
246{}
247
248/*
249 * Polls for ACT (allocation change trigger) handled and sends
250 * ALLOCATE_PAYLOAD message.
251 */
252enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
253 struct dc_context *ctx,
254 const struct dc_stream_state *stream)
255{
256 struct amdgpu_dm_connector *aconnector;
257 struct drm_dp_mst_topology_mgr *mst_mgr;
258 int ret;
259
260 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
261
262 if (!aconnector || !aconnector->mst_port)
263 return ACT_FAILED;
264
265 mst_mgr = &aconnector->mst_port->mst_mgr;
266
267 if (!mst_mgr->mst_state)
268 return ACT_FAILED;
269
270 ret = drm_dp_check_act_status(mst_mgr);
271
272 if (ret)
273 return ACT_FAILED;
274
275 return ACT_SUCCESS;
276}
277
278bool dm_helpers_dp_mst_send_payload_allocation(
279 struct dc_context *ctx,
280 const struct dc_stream_state *stream,
281 bool enable)
282{
283 struct amdgpu_dm_connector *aconnector;
284 struct drm_dp_mst_topology_mgr *mst_mgr;
285 struct drm_dp_mst_port *mst_port;
286
287 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
288
289 if (!aconnector || !aconnector->mst_port)
290 return false;
291
292 mst_port = aconnector->port;
293
294 mst_mgr = &aconnector->mst_port->mst_mgr;
295
296 if (!mst_mgr->mst_state)
297 return false;
298
299 /* It's OK for this to fail */
300 drm_dp_update_payload_part2(mst_mgr);
301
302 if (!enable)
303 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
304
305 return true;
306}
307
308void dm_dtn_log_begin(struct dc_context *ctx,
309 struct dc_log_buffer_ctx *log_ctx)
310{
311 static const char msg[] = "[dtn begin]\n";
312
313 if (!log_ctx) {
314 pr_info("%s", msg);
315 return;
316 }
317
318 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
319}
320
321void dm_dtn_log_append_v(struct dc_context *ctx,
322 struct dc_log_buffer_ctx *log_ctx,
323 const char *msg, ...)
324{
325 va_list args;
326 size_t total;
327 int n;
328
329 if (!log_ctx) {
330 /* No context, redirect to dmesg. */
331 struct va_format vaf;
332
333 vaf.fmt = msg;
334 vaf.va = &args;
335
336 va_start(args, msg);
337 pr_info("%pV", &vaf);
338 va_end(args);
339
340 return;
341 }
342
343 /* Measure the output. */
344 va_start(args, msg);
345 n = vsnprintf(NULL, 0, msg, args);
346 va_end(args);
347
348 if (n <= 0)
349 return;
350
351 /* Reallocate the string buffer as needed. */
352 total = log_ctx->pos + n + 1;
353
354 if (total > log_ctx->size) {
355 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
356
357 if (buf) {
358 memcpy(buf, log_ctx->buf, log_ctx->pos);
359 kfree(log_ctx->buf);
360
361 log_ctx->buf = buf;
362 log_ctx->size = total;
363 }
364 }
365
366 if (!log_ctx->buf)
367 return;
368
369 /* Write the formatted string to the log buffer. */
370 va_start(args, msg);
371 n = vscnprintf(
372 log_ctx->buf + log_ctx->pos,
373 log_ctx->size - log_ctx->pos,
374 msg,
375 args);
376 va_end(args);
377
378 if (n > 0)
379 log_ctx->pos += n;
380}
381
382void dm_dtn_log_end(struct dc_context *ctx,
383 struct dc_log_buffer_ctx *log_ctx)
384{
385 static const char msg[] = "[dtn end]\n";
386
387 if (!log_ctx) {
388 pr_info("%s", msg);
389 return;
390 }
391
392 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
393}
394
395bool dm_helpers_dp_mst_start_top_mgr(
396 struct dc_context *ctx,
397 const struct dc_link *link,
398 bool boot)
399{
400 struct amdgpu_dm_connector *aconnector = link->priv;
401
402 if (!aconnector) {
403 DRM_ERROR("Failed to find connector for link!");
404 return false;
405 }
406
407 if (boot) {
408 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
409 aconnector, aconnector->base.base.id);
410 return true;
411 }
412
413 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
414 aconnector, aconnector->base.base.id);
415
416 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
417}
418
419void dm_helpers_dp_mst_stop_top_mgr(
420 struct dc_context *ctx,
421 const struct dc_link *link)
422{
423 struct amdgpu_dm_connector *aconnector = link->priv;
424
425 if (!aconnector) {
426 DRM_ERROR("Failed to find connector for link!");
427 return;
428 }
429
430 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
431 aconnector, aconnector->base.base.id);
432
433 if (aconnector->mst_mgr.mst_state == true)
434 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
435}
436
437bool dm_helpers_dp_read_dpcd(
438 struct dc_context *ctx,
439 const struct dc_link *link,
440 uint32_t address,
441 uint8_t *data,
442 uint32_t size)
443{
444
445 struct amdgpu_dm_connector *aconnector = link->priv;
446
447 if (!aconnector) {
448 DC_LOG_DC("Failed to find connector for link!\n");
449 return false;
450 }
451
452 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
453 data, size) > 0;
454}
455
456bool dm_helpers_dp_write_dpcd(
457 struct dc_context *ctx,
458 const struct dc_link *link,
459 uint32_t address,
460 const uint8_t *data,
461 uint32_t size)
462{
463 struct amdgpu_dm_connector *aconnector = link->priv;
464
465 if (!aconnector) {
466 DRM_ERROR("Failed to find connector for link!");
467 return false;
468 }
469
470 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
471 address, (uint8_t *)data, size) > 0;
472}
473
474bool dm_helpers_submit_i2c(
475 struct dc_context *ctx,
476 const struct dc_link *link,
477 struct i2c_command *cmd)
478{
479 struct amdgpu_dm_connector *aconnector = link->priv;
480 struct i2c_msg *msgs;
481 int i = 0;
482 int num = cmd->number_of_payloads;
483 bool result;
484
485 if (!aconnector) {
486 DRM_ERROR("Failed to find connector for link!");
487 return false;
488 }
489
490 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
491
492 if (!msgs)
493 return false;
494
495 for (i = 0; i < num; i++) {
496 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
497 msgs[i].addr = cmd->payloads[i].address;
498 msgs[i].len = cmd->payloads[i].length;
499 msgs[i].buf = cmd->payloads[i].data;
500 }
501
502 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
503
504 kfree(msgs);
505
506 return result;
507}
508bool dm_helpers_dp_write_dsc_enable(
509 struct dc_context *ctx,
510 const struct dc_stream_state *stream,
511 bool enable
512)
513{
514 uint8_t enable_dsc = enable ? 1 : 0;
515 struct amdgpu_dm_connector *aconnector;
516
517 if (!stream)
518 return false;
519
520 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
521 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
522
523 if (!aconnector->dsc_aux)
524 return false;
525
526 return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
527 }
528
529 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
530 return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
531
532 return false;
533}
534
535bool dm_helpers_is_dp_sink_present(struct dc_link *link)
536{
537 bool dp_sink_present;
538 struct amdgpu_dm_connector *aconnector = link->priv;
539
540 if (!aconnector) {
541 BUG_ON("Failed to find connector for link!");
542 return true;
543 }
544
545 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
546 dp_sink_present = dc_link_is_dp_sink_present(link);
547 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
548 return dp_sink_present;
549}
550
551enum dc_edid_status dm_helpers_read_local_edid(
552 struct dc_context *ctx,
553 struct dc_link *link,
554 struct dc_sink *sink)
555{
556 struct amdgpu_dm_connector *aconnector = link->priv;
557 struct drm_connector *connector = &aconnector->base;
558 struct i2c_adapter *ddc;
559 int retry = 3;
560 enum dc_edid_status edid_status;
561 struct edid *edid;
562
563 if (link->aux_mode)
564 ddc = &aconnector->dm_dp_aux.aux.ddc;
565 else
566 ddc = &aconnector->i2c->base;
567
568 /* some dongles read edid incorrectly the first time,
569 * do check sum and retry to make sure read correct edid.
570 */
571 do {
572
573 edid = drm_get_edid(&aconnector->base, ddc);
574
575 /* DP Compliance Test 4.2.2.6 */
576 if (link->aux_mode && connector->edid_corrupt)
577 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
578
579 if (!edid && connector->edid_corrupt) {
580 connector->edid_corrupt = false;
581 return EDID_BAD_CHECKSUM;
582 }
583
584 if (!edid)
585 return EDID_NO_RESPONSE;
586
587 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
588 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
589
590 /* We don't need the original edid anymore */
591 kfree(edid);
592
593 /* connector->display_info will be parsed from EDID and saved
594 * into drm_connector->display_info from edid by call stack
595 * below:
596 * drm_parse_ycbcr420_deep_color_info
597 * drm_parse_hdmi_forum_vsdb
598 * drm_parse_cea_ext
599 * drm_add_display_info
600 * drm_connector_update_edid_property
601 *
602 * drm_connector->display_info will be used by amdgpu_dm funcs,
603 * like fill_stream_properties_from_drm_display_mode
604 */
605 amdgpu_dm_update_connector_after_detect(aconnector);
606
607 edid_status = dm_helpers_parse_edid_caps(
608 ctx,
609 &sink->dc_edid,
610 &sink->edid_caps);
611
612 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
613
614 if (edid_status != EDID_OK)
615 DRM_ERROR("EDID err: %d, on connector: %s",
616 edid_status,
617 aconnector->base.name);
618
619 /* DP Compliance Test 4.2.2.3 */
620 if (link->aux_mode)
621 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
622
623 return edid_status;
624}
625
626void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
627{
628 /* TODO: something */
629}
630#ifdef CONFIG_DRM_AMD_DC_DCN3_0
631
632void *dm_helpers_allocate_gpu_mem(
633 struct dc_context *ctx,
634 enum dc_gpu_mem_alloc_type type,
635 size_t size,
636 long long *addr)
637{
638 // TODO
639 return NULL;
640}
641
642void dm_helpers_free_gpu_mem(
643 struct dc_context *ctx,
644 enum dc_gpu_mem_alloc_type type,
645 void *pvMem)
646{
647 // TODO
648}
649#endif
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
28#include <linux/version.h>
29#include <linux/i2c.h>
30
31#include <drm/drm_probe_helper.h>
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_edid.h>
34
35#include "dm_services.h"
36#include "amdgpu.h"
37#include "dc.h"
38#include "amdgpu_dm.h"
39#include "amdgpu_dm_irq.h"
40
41#include "dm_helpers.h"
42
43/* dm_helpers_parse_edid_caps
44 *
45 * Parse edid caps
46 *
47 * @edid: [in] pointer to edid
48 * edid_caps: [in] pointer to edid caps
49 * @return
50 * void
51 * */
52enum dc_edid_status dm_helpers_parse_edid_caps(
53 struct dc_context *ctx,
54 const struct dc_edid *edid,
55 struct dc_edid_caps *edid_caps)
56{
57 struct edid *edid_buf = (struct edid *) edid->raw_edid;
58 struct cea_sad *sads;
59 int sad_count = -1;
60 int sadb_count = -1;
61 int i = 0;
62 int j = 0;
63 uint8_t *sadb = NULL;
64
65 enum dc_edid_status result = EDID_OK;
66
67 if (!edid_caps || !edid)
68 return EDID_BAD_INPUT;
69
70 if (!drm_edid_is_valid(edid_buf))
71 result = EDID_BAD_CHECKSUM;
72
73 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
74 ((uint16_t) edid_buf->mfg_id[1])<<8;
75 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
76 ((uint16_t) edid_buf->prod_code[1])<<8;
77 edid_caps->serial_number = edid_buf->serial;
78 edid_caps->manufacture_week = edid_buf->mfg_week;
79 edid_caps->manufacture_year = edid_buf->mfg_year;
80
81 /* One of the four detailed_timings stores the monitor name. It's
82 * stored in an array of length 13. */
83 for (i = 0; i < 4; i++) {
84 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
85 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
86 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
87 break;
88
89 edid_caps->display_name[j] =
90 edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
91 j++;
92 }
93 }
94 }
95
96 edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
97 (struct edid *) edid->raw_edid);
98
99 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
100 if (sad_count <= 0) {
101 DRM_INFO("SADs count is: %d, don't need to read it\n",
102 sad_count);
103 return result;
104 }
105
106 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
107 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
108 struct cea_sad *sad = &sads[i];
109
110 edid_caps->audio_modes[i].format_code = sad->format;
111 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
112 edid_caps->audio_modes[i].sample_rate = sad->freq;
113 edid_caps->audio_modes[i].sample_size = sad->byte2;
114 }
115
116 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
117
118 if (sadb_count < 0) {
119 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
120 sadb_count = 0;
121 }
122
123 if (sadb_count)
124 edid_caps->speaker_flags = sadb[0];
125 else
126 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
127
128 kfree(sads);
129 kfree(sadb);
130
131 return result;
132}
133
134static void get_payload_table(
135 struct amdgpu_dm_connector *aconnector,
136 struct dp_mst_stream_allocation_table *proposed_table)
137{
138 int i;
139 struct drm_dp_mst_topology_mgr *mst_mgr =
140 &aconnector->mst_port->mst_mgr;
141
142 mutex_lock(&mst_mgr->payload_lock);
143
144 proposed_table->stream_count = 0;
145
146 /* number of active streams */
147 for (i = 0; i < mst_mgr->max_payloads; i++) {
148 if (mst_mgr->payloads[i].num_slots == 0)
149 break; /* end of vcp_id table */
150
151 ASSERT(mst_mgr->payloads[i].payload_state !=
152 DP_PAYLOAD_DELETE_LOCAL);
153
154 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
155 mst_mgr->payloads[i].payload_state ==
156 DP_PAYLOAD_REMOTE) {
157
158 struct dp_mst_stream_allocation *sa =
159 &proposed_table->stream_allocations[
160 proposed_table->stream_count];
161
162 sa->slot_count = mst_mgr->payloads[i].num_slots;
163 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
164 proposed_table->stream_count++;
165 }
166 }
167
168 mutex_unlock(&mst_mgr->payload_lock);
169}
170
171void dm_helpers_dp_update_branch_info(
172 struct dc_context *ctx,
173 const struct dc_link *link)
174{}
175
176/*
177 * Writes payload allocation table in immediate downstream device.
178 */
179bool dm_helpers_dp_mst_write_payload_allocation_table(
180 struct dc_context *ctx,
181 const struct dc_stream_state *stream,
182 struct dp_mst_stream_allocation_table *proposed_table,
183 bool enable)
184{
185 struct amdgpu_dm_connector *aconnector;
186 struct drm_dp_mst_topology_mgr *mst_mgr;
187 struct drm_dp_mst_port *mst_port;
188 int slots = 0;
189 bool ret;
190 int clock;
191 int bpp = 0;
192 int pbn = 0;
193
194 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
195
196 if (!aconnector || !aconnector->mst_port)
197 return false;
198
199 mst_mgr = &aconnector->mst_port->mst_mgr;
200
201 if (!mst_mgr->mst_state)
202 return false;
203
204 mst_port = aconnector->port;
205
206 if (enable) {
207 clock = stream->timing.pix_clk_100hz / 10;
208
209 switch (stream->timing.display_color_depth) {
210
211 case COLOR_DEPTH_666:
212 bpp = 6;
213 break;
214 case COLOR_DEPTH_888:
215 bpp = 8;
216 break;
217 case COLOR_DEPTH_101010:
218 bpp = 10;
219 break;
220 case COLOR_DEPTH_121212:
221 bpp = 12;
222 break;
223 case COLOR_DEPTH_141414:
224 bpp = 14;
225 break;
226 case COLOR_DEPTH_161616:
227 bpp = 16;
228 break;
229 default:
230 ASSERT(bpp != 0);
231 break;
232 }
233
234 bpp = bpp * 3;
235
236 /* TODO need to know link rate */
237
238 pbn = drm_dp_calc_pbn_mode(clock, bpp);
239
240 slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
241 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
242
243 if (!ret)
244 return false;
245
246 } else {
247 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
248 }
249
250 ret = drm_dp_update_payload_part1(mst_mgr);
251
252 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
253 * AUX message. The sequence is slot 1-63 allocated sequence for each
254 * stream. AMD ASIC stream slot allocation should follow the same
255 * sequence. copy DRM MST allocation to dc */
256
257 get_payload_table(aconnector, proposed_table);
258
259 if (ret)
260 return false;
261
262 return true;
263}
264
265/*
266 * poll pending down reply
267 */
268void dm_helpers_dp_mst_poll_pending_down_reply(
269 struct dc_context *ctx,
270 const struct dc_link *link)
271{}
272
273/*
274 * Clear payload allocation table before enable MST DP link.
275 */
276void dm_helpers_dp_mst_clear_payload_allocation_table(
277 struct dc_context *ctx,
278 const struct dc_link *link)
279{}
280
281/*
282 * Polls for ACT (allocation change trigger) handled and sends
283 * ALLOCATE_PAYLOAD message.
284 */
285bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
286 struct dc_context *ctx,
287 const struct dc_stream_state *stream)
288{
289 struct amdgpu_dm_connector *aconnector;
290 struct drm_dp_mst_topology_mgr *mst_mgr;
291 int ret;
292
293 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
294
295 if (!aconnector || !aconnector->mst_port)
296 return false;
297
298 mst_mgr = &aconnector->mst_port->mst_mgr;
299
300 if (!mst_mgr->mst_state)
301 return false;
302
303 ret = drm_dp_check_act_status(mst_mgr);
304
305 if (ret)
306 return false;
307
308 return true;
309}
310
311bool dm_helpers_dp_mst_send_payload_allocation(
312 struct dc_context *ctx,
313 const struct dc_stream_state *stream,
314 bool enable)
315{
316 struct amdgpu_dm_connector *aconnector;
317 struct drm_dp_mst_topology_mgr *mst_mgr;
318 struct drm_dp_mst_port *mst_port;
319 int ret;
320
321 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
322
323 if (!aconnector || !aconnector->mst_port)
324 return false;
325
326 mst_port = aconnector->port;
327
328 mst_mgr = &aconnector->mst_port->mst_mgr;
329
330 if (!mst_mgr->mst_state)
331 return false;
332
333 ret = drm_dp_update_payload_part2(mst_mgr);
334
335 if (ret)
336 return false;
337
338 if (!enable)
339 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
340
341 return true;
342}
343
344void dm_dtn_log_begin(struct dc_context *ctx,
345 struct dc_log_buffer_ctx *log_ctx)
346{
347 static const char msg[] = "[dtn begin]\n";
348
349 if (!log_ctx) {
350 pr_info("%s", msg);
351 return;
352 }
353
354 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
355}
356
357void dm_dtn_log_append_v(struct dc_context *ctx,
358 struct dc_log_buffer_ctx *log_ctx,
359 const char *msg, ...)
360{
361 va_list args;
362 size_t total;
363 int n;
364
365 if (!log_ctx) {
366 /* No context, redirect to dmesg. */
367 struct va_format vaf;
368
369 vaf.fmt = msg;
370 vaf.va = &args;
371
372 va_start(args, msg);
373 pr_info("%pV", &vaf);
374 va_end(args);
375
376 return;
377 }
378
379 /* Measure the output. */
380 va_start(args, msg);
381 n = vsnprintf(NULL, 0, msg, args);
382 va_end(args);
383
384 if (n <= 0)
385 return;
386
387 /* Reallocate the string buffer as needed. */
388 total = log_ctx->pos + n + 1;
389
390 if (total > log_ctx->size) {
391 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
392
393 if (buf) {
394 memcpy(buf, log_ctx->buf, log_ctx->pos);
395 kfree(log_ctx->buf);
396
397 log_ctx->buf = buf;
398 log_ctx->size = total;
399 }
400 }
401
402 if (!log_ctx->buf)
403 return;
404
405 /* Write the formatted string to the log buffer. */
406 va_start(args, msg);
407 n = vscnprintf(
408 log_ctx->buf + log_ctx->pos,
409 log_ctx->size - log_ctx->pos,
410 msg,
411 args);
412 va_end(args);
413
414 if (n > 0)
415 log_ctx->pos += n;
416}
417
418void dm_dtn_log_end(struct dc_context *ctx,
419 struct dc_log_buffer_ctx *log_ctx)
420{
421 static const char msg[] = "[dtn end]\n";
422
423 if (!log_ctx) {
424 pr_info("%s", msg);
425 return;
426 }
427
428 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
429}
430
431bool dm_helpers_dp_mst_start_top_mgr(
432 struct dc_context *ctx,
433 const struct dc_link *link,
434 bool boot)
435{
436 struct amdgpu_dm_connector *aconnector = link->priv;
437
438 if (!aconnector) {
439 DRM_ERROR("Failed to found connector for link!");
440 return false;
441 }
442
443 if (boot) {
444 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
445 aconnector, aconnector->base.base.id);
446 return true;
447 }
448
449 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
450 aconnector, aconnector->base.base.id);
451
452 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
453}
454
455void dm_helpers_dp_mst_stop_top_mgr(
456 struct dc_context *ctx,
457 const struct dc_link *link)
458{
459 struct amdgpu_dm_connector *aconnector = link->priv;
460
461 if (!aconnector) {
462 DRM_ERROR("Failed to found connector for link!");
463 return;
464 }
465
466 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
467 aconnector, aconnector->base.base.id);
468
469 if (aconnector->mst_mgr.mst_state == true)
470 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
471}
472
473bool dm_helpers_dp_read_dpcd(
474 struct dc_context *ctx,
475 const struct dc_link *link,
476 uint32_t address,
477 uint8_t *data,
478 uint32_t size)
479{
480
481 struct amdgpu_dm_connector *aconnector = link->priv;
482
483 if (!aconnector) {
484 DRM_ERROR("Failed to found connector for link!");
485 return false;
486 }
487
488 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
489 data, size) > 0;
490}
491
492bool dm_helpers_dp_write_dpcd(
493 struct dc_context *ctx,
494 const struct dc_link *link,
495 uint32_t address,
496 const uint8_t *data,
497 uint32_t size)
498{
499 struct amdgpu_dm_connector *aconnector = link->priv;
500
501 if (!aconnector) {
502 DRM_ERROR("Failed to found connector for link!");
503 return false;
504 }
505
506 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
507 address, (uint8_t *)data, size) > 0;
508}
509
510bool dm_helpers_submit_i2c(
511 struct dc_context *ctx,
512 const struct dc_link *link,
513 struct i2c_command *cmd)
514{
515 struct amdgpu_dm_connector *aconnector = link->priv;
516 struct i2c_msg *msgs;
517 int i = 0;
518 int num = cmd->number_of_payloads;
519 bool result;
520
521 if (!aconnector) {
522 DRM_ERROR("Failed to found connector for link!");
523 return false;
524 }
525
526 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
527
528 if (!msgs)
529 return false;
530
531 for (i = 0; i < num; i++) {
532 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
533 msgs[i].addr = cmd->payloads[i].address;
534 msgs[i].len = cmd->payloads[i].length;
535 msgs[i].buf = cmd->payloads[i].data;
536 }
537
538 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
539
540 kfree(msgs);
541
542 return result;
543}
544#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
545bool dm_helpers_dp_write_dsc_enable(
546 struct dc_context *ctx,
547 const struct dc_stream_state *stream,
548 bool enable
549)
550{
551 uint8_t enable_dsc = enable ? 1 : 0;
552
553 return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
554}
555#endif
556
557bool dm_helpers_is_dp_sink_present(struct dc_link *link)
558{
559 bool dp_sink_present;
560 struct amdgpu_dm_connector *aconnector = link->priv;
561
562 if (!aconnector) {
563 BUG_ON("Failed to found connector for link!");
564 return true;
565 }
566
567 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
568 dp_sink_present = dc_link_is_dp_sink_present(link);
569 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
570 return dp_sink_present;
571}
572
573enum dc_edid_status dm_helpers_read_local_edid(
574 struct dc_context *ctx,
575 struct dc_link *link,
576 struct dc_sink *sink)
577{
578 struct amdgpu_dm_connector *aconnector = link->priv;
579 struct i2c_adapter *ddc;
580 int retry = 3;
581 enum dc_edid_status edid_status;
582 struct edid *edid;
583
584 if (link->aux_mode)
585 ddc = &aconnector->dm_dp_aux.aux.ddc;
586 else
587 ddc = &aconnector->i2c->base;
588
589 /* some dongles read edid incorrectly the first time,
590 * do check sum and retry to make sure read correct edid.
591 */
592 do {
593
594 edid = drm_get_edid(&aconnector->base, ddc);
595
596 if (!edid)
597 return EDID_NO_RESPONSE;
598
599 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
600 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
601
602 /* We don't need the original edid anymore */
603 kfree(edid);
604
605 edid_status = dm_helpers_parse_edid_caps(
606 ctx,
607 &sink->dc_edid,
608 &sink->edid_caps);
609
610 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
611
612 if (edid_status != EDID_OK)
613 DRM_ERROR("EDID err: %d, on connector: %s",
614 edid_status,
615 aconnector->base.name);
616 if (link->aux_mode) {
617 union test_request test_request = { {0} };
618 union test_response test_response = { {0} };
619
620 dm_helpers_dp_read_dpcd(ctx,
621 link,
622 DP_TEST_REQUEST,
623 &test_request.raw,
624 sizeof(union test_request));
625
626 if (!test_request.bits.EDID_READ)
627 return edid_status;
628
629 test_response.bits.EDID_CHECKSUM_WRITE = 1;
630
631 dm_helpers_dp_write_dpcd(ctx,
632 link,
633 DP_TEST_EDID_CHECKSUM,
634 &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
635 1);
636
637 dm_helpers_dp_write_dpcd(ctx,
638 link,
639 DP_TEST_RESPONSE,
640 &test_response.raw,
641 sizeof(test_response));
642
643 }
644
645 return edid_status;
646}
647
648void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
649{
650 /* TODO: something */
651}