Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright (C) 2017 Google, Inc.
   4 * Copyright _ 2017-2019, Intel Corporation.
   5 *
   6 * Authors:
   7 * Sean Paul <seanpaul@chromium.org>
   8 * Ramalingam C <ramalingam.c@intel.com>
   9 */
  10
  11#include <linux/component.h>
  12#include <linux/i2c.h>
  13#include <linux/random.h>
  14
  15#include <drm/display/drm_hdcp_helper.h>
  16#include <drm/intel/i915_component.h>
  17
  18#include "i915_drv.h"
  19#include "i915_reg.h"
  20#include "intel_connector.h"
  21#include "intel_de.h"
  22#include "intel_display_power.h"
  23#include "intel_display_power_well.h"
  24#include "intel_display_types.h"
  25#include "intel_hdcp.h"
  26#include "intel_hdcp_gsc.h"
  27#include "intel_hdcp_regs.h"
  28#include "intel_hdcp_shim.h"
  29#include "intel_pcode.h"
  30
  31#define KEY_LOAD_TRIES	5
  32#define HDCP2_LC_RETRY_CNT			3
  33
  34/* WA: 16022217614 */
  35static void
  36intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
  37				      struct intel_hdcp *hdcp)
  38{
  39	struct intel_display *display = to_intel_display(encoder);
  40
  41	/* Here we assume HDMI is in TMDS mode of operation */
  42	if (encoder->type != INTEL_OUTPUT_HDMI)
  43		return;
  44
  45	if (DISPLAY_VER(display) >= 14) {
  46		if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER))
  47			intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
  48				     0, HDCP_LINE_REKEY_DISABLE);
  49		else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
  50			 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER))
  51			intel_de_rmw(display,
  52				     TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder),
  53				     0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
  54	}
  55}
  56
  57static int intel_conn_to_vcpi(struct intel_atomic_state *state,
  58			      struct intel_connector *connector)
  59{
  60	struct drm_dp_mst_topology_mgr *mgr;
  61	struct drm_dp_mst_atomic_payload *payload;
  62	struct drm_dp_mst_topology_state *mst_state;
  63	int vcpi = 0;
  64
  65	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
  66	if (!connector->port)
  67		return 0;
  68	mgr = connector->port->mgr;
  69
  70	drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
  71	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
  72	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
  73	if (drm_WARN_ON(mgr->dev, !payload))
  74		goto out;
  75
  76	vcpi = payload->vcpi;
  77	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
  78		vcpi = 0;
  79		goto out;
  80	}
  81out:
  82	return vcpi;
  83}
  84
  85/*
  86 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
  87 * content_type for all streams in DP MST topology because security f/w doesn't
  88 * have any provision to mark content_type for each stream separately, it marks
  89 * all available streams with the content_type proivided at the time of port
  90 * authentication. This may prohibit the userspace to use type1 content on
  91 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
  92 * DP MST topology. Though it is not compulsory, security fw should change its
  93 * policy to mark different content_types for different streams.
  94 */
  95static int
  96intel_hdcp_required_content_stream(struct intel_atomic_state *state,
  97				   struct intel_digital_port *dig_port)
  98{
  99	struct intel_display *display = to_intel_display(state);
 100	struct drm_connector_list_iter conn_iter;
 101	struct intel_digital_port *conn_dig_port;
 102	struct intel_connector *connector;
 103	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 104	bool enforce_type0 = false;
 105	int k;
 106
 107	if (dig_port->hdcp_auth_status)
 108		return 0;
 109
 110	data->k = 0;
 111
 112	if (!dig_port->hdcp_mst_type1_capable)
 113		enforce_type0 = true;
 114
 115	drm_connector_list_iter_begin(display->drm, &conn_iter);
 116	for_each_intel_connector_iter(connector, &conn_iter) {
 117		if (connector->base.status == connector_status_disconnected)
 118			continue;
 119
 120		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
 121			continue;
 122
 123		conn_dig_port = intel_attached_dig_port(connector);
 124		if (conn_dig_port != dig_port)
 125			continue;
 126
 127		data->streams[data->k].stream_id =
 128			intel_conn_to_vcpi(state, connector);
 129		data->k++;
 130
 131		/* if there is only one active stream */
 132		if (dig_port->dp.active_mst_links <= 1)
 133			break;
 134	}
 135	drm_connector_list_iter_end(&conn_iter);
 136
 137	if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0))
 138		return -EINVAL;
 139
 140	/*
 141	 * Apply common protection level across all streams in DP MST Topology.
 142	 * Use highest supported content type for all streams in DP MST Topology.
 143	 */
 144	for (k = 0; k < data->k; k++)
 145		data->streams[k].stream_type =
 146			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
 147
 148	return 0;
 149}
 150
 151static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
 152				      struct intel_connector *connector)
 153{
 154	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 155	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 156	struct intel_hdcp *hdcp = &connector->hdcp;
 157
 158	if (intel_encoder_is_mst(intel_attached_encoder(connector)))
 159		return intel_hdcp_required_content_stream(state, dig_port);
 160
 161	data->k = 1;
 162	data->streams[0].stream_id = 0;
 163	data->streams[0].stream_type = hdcp->content_type;
 164
 165	return 0;
 166}
 167
 168static
 169bool intel_hdcp_is_ksv_valid(u8 *ksv)
 170{
 171	int i, ones = 0;
 172	/* KSV has 20 1's and 20 0's */
 173	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
 174		ones += hweight8(ksv[i]);
 175	if (ones != 20)
 176		return false;
 177
 178	return true;
 179}
 180
 181static
 182int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
 183			       const struct intel_hdcp_shim *shim, u8 *bksv)
 184{
 185	struct intel_display *display = to_intel_display(dig_port);
 186	int ret, i, tries = 2;
 187
 188	/* HDCP spec states that we must retry the bksv if it is invalid */
 189	for (i = 0; i < tries; i++) {
 190		ret = shim->read_bksv(dig_port, bksv);
 191		if (ret)
 192			return ret;
 193		if (intel_hdcp_is_ksv_valid(bksv))
 194			break;
 195	}
 196	if (i == tries) {
 197		drm_dbg_kms(display->drm, "Bksv is invalid\n");
 198		return -ENODEV;
 199	}
 200
 201	return 0;
 202}
 203
 204/* Is HDCP1.4 capable on Platform and Sink */
 205bool intel_hdcp_get_capability(struct intel_connector *connector)
 206{
 207	struct intel_digital_port *dig_port;
 208	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 209	bool capable = false;
 210	u8 bksv[5];
 211
 212	if (!intel_attached_encoder(connector))
 213		return capable;
 214
 215	dig_port = intel_attached_dig_port(connector);
 216
 217	if (!shim)
 218		return capable;
 219
 220	if (shim->hdcp_get_capability) {
 221		shim->hdcp_get_capability(dig_port, &capable);
 222	} else {
 223		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
 224			capable = true;
 225	}
 226
 227	return capable;
 228}
 229
 230/*
 231 * Check if the source has all the building blocks ready to make
 232 * HDCP 2.2 work
 233 */
 234static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
 235{
 236	struct intel_display *display = to_intel_display(connector);
 237	struct intel_hdcp *hdcp = &connector->hdcp;
 238
 239	/* I915 support for HDCP2.2 */
 240	if (!hdcp->hdcp2_supported)
 241		return false;
 242
 243	/* If MTL+ make sure gsc is loaded and proxy is setup */
 244	if (intel_hdcp_gsc_cs_required(display)) {
 245		if (!intel_hdcp_gsc_check_status(display))
 246			return false;
 247	}
 248
 249	/* MEI/GSC interface is solid depending on which is used */
 250	mutex_lock(&display->hdcp.hdcp_mutex);
 251	if (!display->hdcp.comp_added || !display->hdcp.arbiter) {
 252		mutex_unlock(&display->hdcp.hdcp_mutex);
 253		return false;
 254	}
 255	mutex_unlock(&display->hdcp.hdcp_mutex);
 256
 257	return true;
 258}
 259
 260/* Is HDCP2.2 capable on Platform and Sink */
 261bool intel_hdcp2_get_capability(struct intel_connector *connector)
 262{
 263	struct intel_hdcp *hdcp = &connector->hdcp;
 264	bool capable = false;
 265
 266	if (!intel_hdcp2_prerequisite(connector))
 267		return false;
 268
 269	/* Sink's capability for HDCP2.2 */
 270	hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
 271
 272	return capable;
 273}
 274
 275void intel_hdcp_get_remote_capability(struct intel_connector *connector,
 276				      bool *hdcp_capable,
 277				      bool *hdcp2_capable)
 278{
 279	struct intel_hdcp *hdcp = &connector->hdcp;
 280
 281	if (!hdcp->shim->get_remote_hdcp_capability)
 282		return;
 283
 284	hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
 285					       hdcp2_capable);
 286
 287	if (!intel_hdcp2_prerequisite(connector))
 288		*hdcp2_capable = false;
 289}
 290
 291static bool intel_hdcp_in_use(struct intel_display *display,
 292			      enum transcoder cpu_transcoder, enum port port)
 293{
 294	return intel_de_read(display,
 295			     HDCP_STATUS(display, cpu_transcoder, port)) &
 296		HDCP_STATUS_ENC;
 297}
 298
 299static bool intel_hdcp2_in_use(struct intel_display *display,
 300			       enum transcoder cpu_transcoder, enum port port)
 301{
 302	return intel_de_read(display,
 303			     HDCP2_STATUS(display, cpu_transcoder, port)) &
 304		LINK_ENCRYPTION_STATUS;
 305}
 306
 307static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 308				    const struct intel_hdcp_shim *shim)
 309{
 310	int ret, read_ret;
 311	bool ksv_ready;
 312
 313	/* Poll for ksv list ready (spec says max time allowed is 5s) */
 314	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
 315							 &ksv_ready),
 316			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
 317			 100 * 1000);
 318	if (ret)
 319		return ret;
 320	if (read_ret)
 321		return read_ret;
 322	if (!ksv_ready)
 323		return -ETIMEDOUT;
 324
 325	return 0;
 326}
 327
 328static bool hdcp_key_loadable(struct intel_display *display)
 329{
 330	struct drm_i915_private *i915 = to_i915(display->drm);
 331	enum i915_power_well_id id;
 332	intel_wakeref_t wakeref;
 333	bool enabled = false;
 334
 335	/*
 336	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
 337	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
 338	 */
 339	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 340		id = HSW_DISP_PW_GLOBAL;
 341	else
 342		id = SKL_DISP_PW_1;
 343
 344	/* PG1 (power well #1) needs to be enabled */
 345	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 346		enabled = intel_display_power_well_is_enabled(i915, id);
 347
 348	/*
 349	 * Another req for hdcp key loadability is enabled state of pll for
 350	 * cdclk. Without active crtc we wont land here. So we are assuming that
 351	 * cdclk is already on.
 352	 */
 353
 354	return enabled;
 355}
 356
 357static void intel_hdcp_clear_keys(struct intel_display *display)
 358{
 359	intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
 360	intel_de_write(display, HDCP_KEY_STATUS,
 361		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 362}
 363
 364static int intel_hdcp_load_keys(struct intel_display *display)
 365{
 366	struct drm_i915_private *i915 = to_i915(display->drm);
 367	int ret;
 368	u32 val;
 369
 370	val = intel_de_read(display, HDCP_KEY_STATUS);
 371	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 372		return 0;
 373
 374	/*
 375	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
 376	 * out of reset. So if Key is not already loaded, its an error state.
 377	 */
 378	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 379		if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 380			return -ENXIO;
 381
 382	/*
 383	 * Initiate loading the HDCP key from fuses.
 384	 *
 385	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
 386	 * version 9 platforms (minus BXT) differ in the key load trigger
 387	 * process from other platforms. These platforms use the GT Driver
 388	 * Mailbox interface.
 389	 */
 390	if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) {
 391		ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
 392		if (ret) {
 393			drm_err(display->drm,
 394				"Failed to initiate HDCP key load (%d)\n",
 395				ret);
 396			return ret;
 397		}
 398	} else {
 399		intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 400	}
 401
 402	/* Wait for the keys to load (500us) */
 403	ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
 404				   HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 405				   10, 1, &val);
 406	if (ret)
 407		return ret;
 408	else if (!(val & HDCP_KEY_LOAD_STATUS))
 409		return -ENXIO;
 410
 411	/* Send Aksv over to PCH display for use in authentication */
 412	intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 413
 414	return 0;
 415}
 416
 417/* Returns updated SHA-1 index */
 418static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
 419{
 420	intel_de_write(display, HDCP_SHA_TEXT, sha_text);
 421	if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
 422		drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
 423		return -ETIMEDOUT;
 424	}
 425	return 0;
 426}
 427
 428static
 429u32 intel_hdcp_get_repeater_ctl(struct intel_display *display,
 430				enum transcoder cpu_transcoder, enum port port)
 431{
 432	if (DISPLAY_VER(display) >= 12) {
 433		switch (cpu_transcoder) {
 434		case TRANSCODER_A:
 435			return HDCP_TRANSA_REP_PRESENT |
 436			       HDCP_TRANSA_SHA1_M0;
 437		case TRANSCODER_B:
 438			return HDCP_TRANSB_REP_PRESENT |
 439			       HDCP_TRANSB_SHA1_M0;
 440		case TRANSCODER_C:
 441			return HDCP_TRANSC_REP_PRESENT |
 442			       HDCP_TRANSC_SHA1_M0;
 443		case TRANSCODER_D:
 444			return HDCP_TRANSD_REP_PRESENT |
 445			       HDCP_TRANSD_SHA1_M0;
 446		default:
 447			drm_err(display->drm, "Unknown transcoder %d\n",
 448				cpu_transcoder);
 449			return 0;
 450		}
 451	}
 452
 453	switch (port) {
 454	case PORT_A:
 455		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
 456	case PORT_B:
 457		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
 458	case PORT_C:
 459		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
 460	case PORT_D:
 461		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
 462	case PORT_E:
 463		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 464	default:
 465		drm_err(display->drm, "Unknown port %d\n", port);
 466		return 0;
 467	}
 468}
 469
 470static
 471int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 472				const struct intel_hdcp_shim *shim,
 473				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 474{
 475	struct intel_display *display = to_intel_display(connector);
 476	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 477	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 478	enum port port = dig_port->base.port;
 479	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 480	int ret, i, j, sha_idx;
 481
 482	/* Process V' values from the receiver */
 483	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 484		ret = shim->read_v_prime_part(dig_port, i, &vprime);
 485		if (ret)
 486			return ret;
 487		intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime);
 488	}
 489
 490	/*
 491	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
 492	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
 493	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
 494	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
 495	 * index will keep track of our progress through the 64 bytes as well as
 496	 * helping us work the 40-bit KSVs through our 32-bit register.
 497	 *
 498	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
 499	 */
 500	sha_idx = 0;
 501	sha_text = 0;
 502	sha_leftovers = 0;
 503	rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port);
 504	intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 505	for (i = 0; i < num_downstream; i++) {
 506		unsigned int sha_empty;
 507		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
 508
 509		/* Fill up the empty slots in sha_text and write it out */
 510		sha_empty = sizeof(sha_text) - sha_leftovers;
 511		for (j = 0; j < sha_empty; j++) {
 512			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
 513			sha_text |= ksv[j] << off;
 514		}
 515
 516		ret = intel_write_sha_text(display, sha_text);
 517		if (ret < 0)
 518			return ret;
 519
 520		/* Programming guide writes this every 64 bytes */
 521		sha_idx += sizeof(sha_text);
 522		if (!(sha_idx % 64))
 523			intel_de_write(display, HDCP_REP_CTL,
 524				       rep_ctl | HDCP_SHA1_TEXT_32);
 525
 526		/* Store the leftover bytes from the ksv in sha_text */
 527		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
 528		sha_text = 0;
 529		for (j = 0; j < sha_leftovers; j++)
 530			sha_text |= ksv[sha_empty + j] <<
 531					((sizeof(sha_text) - j - 1) * 8);
 532
 533		/*
 534		 * If we still have room in sha_text for more data, continue.
 535		 * Otherwise, write it out immediately.
 536		 */
 537		if (sizeof(sha_text) > sha_leftovers)
 538			continue;
 539
 540		ret = intel_write_sha_text(display, sha_text);
 541		if (ret < 0)
 542			return ret;
 543		sha_leftovers = 0;
 544		sha_text = 0;
 545		sha_idx += sizeof(sha_text);
 546	}
 547
 548	/*
 549	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
 550	 * bytes are leftover from the last ksv, we might be able to fit them
 551	 * all in sha_text (first 2 cases), or we might need to split them up
 552	 * into 2 writes (last 2 cases).
 553	 */
 554	if (sha_leftovers == 0) {
 555		/* Write 16 bits of text, 16 bits of M0 */
 556		intel_de_write(display, HDCP_REP_CTL,
 557			       rep_ctl | HDCP_SHA1_TEXT_16);
 558		ret = intel_write_sha_text(display,
 559					   bstatus[0] << 8 | bstatus[1]);
 560		if (ret < 0)
 561			return ret;
 562		sha_idx += sizeof(sha_text);
 563
 564		/* Write 32 bits of M0 */
 565		intel_de_write(display, HDCP_REP_CTL,
 566			       rep_ctl | HDCP_SHA1_TEXT_0);
 567		ret = intel_write_sha_text(display, 0);
 568		if (ret < 0)
 569			return ret;
 570		sha_idx += sizeof(sha_text);
 571
 572		/* Write 16 bits of M0 */
 573		intel_de_write(display, HDCP_REP_CTL,
 574			       rep_ctl | HDCP_SHA1_TEXT_16);
 575		ret = intel_write_sha_text(display, 0);
 576		if (ret < 0)
 577			return ret;
 578		sha_idx += sizeof(sha_text);
 579
 580	} else if (sha_leftovers == 1) {
 581		/* Write 24 bits of text, 8 bits of M0 */
 582		intel_de_write(display, HDCP_REP_CTL,
 583			       rep_ctl | HDCP_SHA1_TEXT_24);
 584		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 585		/* Only 24-bits of data, must be in the LSB */
 586		sha_text = (sha_text & 0xffffff00) >> 8;
 587		ret = intel_write_sha_text(display, sha_text);
 588		if (ret < 0)
 589			return ret;
 590		sha_idx += sizeof(sha_text);
 591
 592		/* Write 32 bits of M0 */
 593		intel_de_write(display, HDCP_REP_CTL,
 594			       rep_ctl | HDCP_SHA1_TEXT_0);
 595		ret = intel_write_sha_text(display, 0);
 596		if (ret < 0)
 597			return ret;
 598		sha_idx += sizeof(sha_text);
 599
 600		/* Write 24 bits of M0 */
 601		intel_de_write(display, HDCP_REP_CTL,
 602			       rep_ctl | HDCP_SHA1_TEXT_8);
 603		ret = intel_write_sha_text(display, 0);
 604		if (ret < 0)
 605			return ret;
 606		sha_idx += sizeof(sha_text);
 607
 608	} else if (sha_leftovers == 2) {
 609		/* Write 32 bits of text */
 610		intel_de_write(display, HDCP_REP_CTL,
 611			       rep_ctl | HDCP_SHA1_TEXT_32);
 612		sha_text |= bstatus[0] << 8 | bstatus[1];
 613		ret = intel_write_sha_text(display, sha_text);
 614		if (ret < 0)
 615			return ret;
 616		sha_idx += sizeof(sha_text);
 617
 618		/* Write 64 bits of M0 */
 619		intel_de_write(display, HDCP_REP_CTL,
 620			       rep_ctl | HDCP_SHA1_TEXT_0);
 621		for (i = 0; i < 2; i++) {
 622			ret = intel_write_sha_text(display, 0);
 623			if (ret < 0)
 624				return ret;
 625			sha_idx += sizeof(sha_text);
 626		}
 627
 628		/*
 629		 * Terminate the SHA-1 stream by hand. For the other leftover
 630		 * cases this is appended by the hardware.
 631		 */
 632		intel_de_write(display, HDCP_REP_CTL,
 633			       rep_ctl | HDCP_SHA1_TEXT_32);
 634		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
 635		ret = intel_write_sha_text(display, sha_text);
 636		if (ret < 0)
 637			return ret;
 638		sha_idx += sizeof(sha_text);
 639	} else if (sha_leftovers == 3) {
 640		/* Write 32 bits of text (filled from LSB) */
 641		intel_de_write(display, HDCP_REP_CTL,
 642			       rep_ctl | HDCP_SHA1_TEXT_32);
 643		sha_text |= bstatus[0];
 644		ret = intel_write_sha_text(display, sha_text);
 645		if (ret < 0)
 646			return ret;
 647		sha_idx += sizeof(sha_text);
 648
 649		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
 650		intel_de_write(display, HDCP_REP_CTL,
 651			       rep_ctl | HDCP_SHA1_TEXT_8);
 652		ret = intel_write_sha_text(display, bstatus[1]);
 653		if (ret < 0)
 654			return ret;
 655		sha_idx += sizeof(sha_text);
 656
 657		/* Write 32 bits of M0 */
 658		intel_de_write(display, HDCP_REP_CTL,
 659			       rep_ctl | HDCP_SHA1_TEXT_0);
 660		ret = intel_write_sha_text(display, 0);
 661		if (ret < 0)
 662			return ret;
 663		sha_idx += sizeof(sha_text);
 664
 665		/* Write 8 bits of M0 */
 666		intel_de_write(display, HDCP_REP_CTL,
 667			       rep_ctl | HDCP_SHA1_TEXT_24);
 668		ret = intel_write_sha_text(display, 0);
 669		if (ret < 0)
 670			return ret;
 671		sha_idx += sizeof(sha_text);
 672	} else {
 673		drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n",
 674			    sha_leftovers);
 675		return -EINVAL;
 676	}
 677
 678	intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 679	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 680	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 681		ret = intel_write_sha_text(display, 0);
 682		if (ret < 0)
 683			return ret;
 684		sha_idx += sizeof(sha_text);
 685	}
 686
 687	/*
 688	 * Last write gets the length of the concatenation in bits. That is:
 689	 *  - 5 bytes per device
 690	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
 691	 */
 692	sha_text = (num_downstream * 5 + 10) * 8;
 693	ret = intel_write_sha_text(display, sha_text);
 694	if (ret < 0)
 695		return ret;
 696
 697	/* Tell the HW we're done with the hash and wait for it to ACK */
 698	intel_de_write(display, HDCP_REP_CTL,
 699		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 700	if (intel_de_wait_for_set(display, HDCP_REP_CTL,
 701				  HDCP_SHA1_COMPLETE, 1)) {
 702		drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
 703		return -ETIMEDOUT;
 704	}
 705	if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
 706		drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n");
 707		return -ENXIO;
 708	}
 709
 710	return 0;
 711}
 712
 713/* Implements Part 2 of the HDCP authorization procedure */
 714static
 715int intel_hdcp_auth_downstream(struct intel_connector *connector)
 716{
 717	struct intel_display *display = to_intel_display(connector);
 718	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 719	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 720	u8 bstatus[2], num_downstream, *ksv_fifo;
 721	int ret, i, tries = 3;
 722
 723	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
 724	if (ret) {
 725		drm_dbg_kms(display->drm,
 726			    "KSV list failed to become ready (%d)\n", ret);
 727		return ret;
 728	}
 729
 730	ret = shim->read_bstatus(dig_port, bstatus);
 731	if (ret)
 732		return ret;
 733
 734	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 735	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
 736		drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n");
 737		return -EPERM;
 738	}
 739
 740	/*
 741	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
 742	 * the HDCP encryption. That implies that repeater can't have its own
 743	 * display. As there is no consumption of encrypted content in the
 744	 * repeater with 0 downstream devices, we are failing the
 745	 * authentication.
 746	 */
 747	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 748	if (num_downstream == 0) {
 749		drm_dbg_kms(display->drm,
 750			    "Repeater with zero downstream devices\n");
 751		return -EINVAL;
 752	}
 753
 754	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 755	if (!ksv_fifo) {
 756		drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n");
 757		return -ENOMEM;
 758	}
 759
 760	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
 761	if (ret)
 762		goto err;
 763
 764	if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo,
 765					num_downstream) > 0) {
 766		drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n");
 767		ret = -EPERM;
 768		goto err;
 769	}
 770
 771	/*
 772	 * When V prime mismatches, DP Spec mandates re-read of
 773	 * V prime atleast twice.
 774	 */
 775	for (i = 0; i < tries; i++) {
 776		ret = intel_hdcp_validate_v_prime(connector, shim,
 777						  ksv_fifo, num_downstream,
 778						  bstatus);
 779		if (!ret)
 780			break;
 781	}
 782
 783	if (i == tries) {
 784		drm_dbg_kms(display->drm,
 785			    "V Prime validation failed.(%d)\n", ret);
 786		goto err;
 787	}
 788
 789	drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n",
 790		    num_downstream);
 791	ret = 0;
 792err:
 793	kfree(ksv_fifo);
 794	return ret;
 795}
 796
 797/* Implements Part 1 of the HDCP authorization procedure */
 798static int intel_hdcp_auth(struct intel_connector *connector)
 799{
 800	struct intel_display *display = to_intel_display(connector);
 801	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 802	struct intel_hdcp *hdcp = &connector->hdcp;
 803	const struct intel_hdcp_shim *shim = hdcp->shim;
 804	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 805	enum port port = dig_port->base.port;
 806	unsigned long r0_prime_gen_start;
 807	int ret, i, tries = 2;
 808	union {
 809		u32 reg[2];
 810		u8 shim[DRM_HDCP_AN_LEN];
 811	} an;
 812	union {
 813		u32 reg[2];
 814		u8 shim[DRM_HDCP_KSV_LEN];
 815	} bksv;
 816	union {
 817		u32 reg;
 818		u8 shim[DRM_HDCP_RI_LEN];
 819	} ri;
 820	bool repeater_present, hdcp_capable;
 821
 822	/*
 823	 * Detects whether the display is HDCP capable. Although we check for
 824	 * valid Bksv below, the HDCP over DP spec requires that we check
 825	 * whether the display supports HDCP before we write An. For HDMI
 826	 * displays, this is not necessary.
 827	 */
 828	if (shim->hdcp_get_capability) {
 829		ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
 830		if (ret)
 831			return ret;
 832		if (!hdcp_capable) {
 833			drm_dbg_kms(display->drm,
 834				    "Panel is not HDCP capable\n");
 835			return -EINVAL;
 836		}
 837	}
 838
 839	/* Initialize An with 2 random values and acquire it */
 840	for (i = 0; i < 2; i++)
 841		intel_de_write(display,
 842			       HDCP_ANINIT(display, cpu_transcoder, port),
 843			       get_random_u32());
 844	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
 845		       HDCP_CONF_CAPTURE_AN);
 846
 847	/* Wait for An to be acquired */
 848	if (intel_de_wait_for_set(display,
 849				  HDCP_STATUS(display, cpu_transcoder, port),
 850				  HDCP_STATUS_AN_READY, 1)) {
 851		drm_err(display->drm, "Timed out waiting for An\n");
 852		return -ETIMEDOUT;
 853	}
 854
 855	an.reg[0] = intel_de_read(display,
 856				  HDCP_ANLO(display, cpu_transcoder, port));
 857	an.reg[1] = intel_de_read(display,
 858				  HDCP_ANHI(display, cpu_transcoder, port));
 859	ret = shim->write_an_aksv(dig_port, an.shim);
 860	if (ret)
 861		return ret;
 862
 863	r0_prime_gen_start = jiffies;
 864
 865	memset(&bksv, 0, sizeof(bksv));
 866
 867	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
 868	if (ret < 0)
 869		return ret;
 870
 871	if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) {
 872		drm_err(display->drm, "BKSV is revoked\n");
 873		return -EPERM;
 874	}
 875
 876	intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port),
 877		       bksv.reg[0]);
 878	intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port),
 879		       bksv.reg[1]);
 880
 881	ret = shim->repeater_present(dig_port, &repeater_present);
 882	if (ret)
 883		return ret;
 884	if (repeater_present)
 885		intel_de_write(display, HDCP_REP_CTL,
 886			       intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port));
 887
 888	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
 889	if (ret)
 890		return ret;
 891
 892	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
 893		       HDCP_CONF_AUTH_AND_ENC);
 894
 895	/* Wait for R0 ready */
 896	if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
 897		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
 898		drm_err(display->drm, "Timed out waiting for R0 ready\n");
 899		return -ETIMEDOUT;
 900	}
 901
 902	/*
 903	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
 904	 * some monitors can take longer than this. We'll set the timeout at
 905	 * 300ms just to be sure.
 906	 *
 907	 * On DP, there's an R0_READY bit available but no such bit
 908	 * exists on HDMI. Since the upper-bound is the same, we'll just do
 909	 * the stupid thing instead of polling on one and not the other.
 910	 */
 911	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 912
 913	tries = 3;
 914
 915	/*
 916	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
 917	 * of R0 mismatch.
 918	 */
 919	for (i = 0; i < tries; i++) {
 920		ri.reg = 0;
 921		ret = shim->read_ri_prime(dig_port, ri.shim);
 922		if (ret)
 923			return ret;
 924		intel_de_write(display,
 925			       HDCP_RPRIME(display, cpu_transcoder, port),
 926			       ri.reg);
 927
 928		/* Wait for Ri prime match */
 929		if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
 930			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 931			break;
 932	}
 933
 934	if (i == tries) {
 935		drm_dbg_kms(display->drm,
 936			    "Timed out waiting for Ri prime match (%x)\n",
 937			    intel_de_read(display,
 938					  HDCP_STATUS(display, cpu_transcoder, port)));
 939		return -ETIMEDOUT;
 940	}
 941
 942	/* Wait for encryption confirmation */
 943	if (intel_de_wait_for_set(display,
 944				  HDCP_STATUS(display, cpu_transcoder, port),
 945				  HDCP_STATUS_ENC,
 946				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 947		drm_err(display->drm, "Timed out waiting for encryption\n");
 948		return -ETIMEDOUT;
 949	}
 950
 951	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
 952	if (shim->stream_encryption) {
 953		ret = shim->stream_encryption(connector, true);
 954		if (ret) {
 955			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
 956				connector->base.base.id, connector->base.name);
 957			return ret;
 958		}
 959		drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
 960			    transcoder_name(hdcp->stream_transcoder));
 961	}
 962
 963	if (repeater_present)
 964		return intel_hdcp_auth_downstream(connector);
 965
 966	drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n");
 967	return 0;
 968}
 969
 970static int _intel_hdcp_disable(struct intel_connector *connector)
 971{
 972	struct intel_display *display = to_intel_display(connector);
 973	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 974	struct intel_hdcp *hdcp = &connector->hdcp;
 975	enum port port = dig_port->base.port;
 976	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 977	u32 repeater_ctl;
 978	int ret;
 979
 980	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
 981		    connector->base.base.id, connector->base.name);
 982
 983	if (hdcp->shim->stream_encryption) {
 984		ret = hdcp->shim->stream_encryption(connector, false);
 985		if (ret) {
 986			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
 987				connector->base.base.id, connector->base.name);
 988			return ret;
 989		}
 990		drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
 991			    transcoder_name(hdcp->stream_transcoder));
 992		/*
 993		 * If there are other connectors on this port using HDCP,
 994		 * don't disable it until it disabled HDCP encryption for
 995		 * all connectors in MST topology.
 996		 */
 997		if (dig_port->num_hdcp_streams > 0)
 998			return 0;
 999	}
1000
1001	hdcp->hdcp_encrypted = false;
1002	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
1003	if (intel_de_wait_for_clear(display,
1004				    HDCP_STATUS(display, cpu_transcoder, port),
1005				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1006		drm_err(display->drm,
1007			"Failed to disable HDCP, timeout clearing status\n");
1008		return -ETIMEDOUT;
1009	}
1010
1011	repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder,
1012						   port);
1013	intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0);
1014
1015	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1016	if (ret) {
1017		drm_err(display->drm, "Failed to disable HDCP signalling\n");
1018		return ret;
1019	}
1020
1021	drm_dbg_kms(display->drm, "HDCP is disabled\n");
1022	return 0;
1023}
1024
1025static int intel_hdcp1_enable(struct intel_connector *connector)
1026{
1027	struct intel_display *display = to_intel_display(connector);
1028	struct intel_hdcp *hdcp = &connector->hdcp;
1029	int i, ret, tries = 3;
1030
1031	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1032		    connector->base.base.id, connector->base.name);
1033
1034	if (!hdcp_key_loadable(display)) {
1035		drm_err(display->drm, "HDCP key Load is not possible\n");
1036		return -ENXIO;
1037	}
1038
1039	for (i = 0; i < KEY_LOAD_TRIES; i++) {
1040		ret = intel_hdcp_load_keys(display);
1041		if (!ret)
1042			break;
1043		intel_hdcp_clear_keys(display);
1044	}
1045	if (ret) {
1046		drm_err(display->drm, "Could not load HDCP keys, (%d)\n",
1047			ret);
1048		return ret;
1049	}
1050
1051	/* Incase of authentication failures, HDCP spec expects reauth. */
1052	for (i = 0; i < tries; i++) {
1053		ret = intel_hdcp_auth(connector);
1054		if (!ret) {
1055			hdcp->hdcp_encrypted = true;
1056			return 0;
1057		}
1058
1059		drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret);
1060
1061		/* Ensuring HDCP encryption and signalling are stopped. */
1062		_intel_hdcp_disable(connector);
1063	}
1064
1065	drm_dbg_kms(display->drm,
1066		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1067	return ret;
1068}
1069
1070static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1071{
1072	return container_of(hdcp, struct intel_connector, hdcp);
1073}
1074
1075static void intel_hdcp_update_value(struct intel_connector *connector,
1076				    u64 value, bool update_property)
1077{
1078	struct intel_display *display = to_intel_display(connector);
1079	struct drm_i915_private *i915 = to_i915(display->drm);
1080	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1081	struct intel_hdcp *hdcp = &connector->hdcp;
1082
1083	drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex));
1084
1085	if (hdcp->value == value)
1086		return;
1087
1088	drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex));
1089
1090	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1091		if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0))
1092			dig_port->num_hdcp_streams--;
1093	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1094		dig_port->num_hdcp_streams++;
1095	}
1096
1097	hdcp->value = value;
1098	if (update_property) {
1099		drm_connector_get(&connector->base);
1100		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
1101			drm_connector_put(&connector->base);
1102	}
1103}
1104
1105/* Implements Part 3 of the HDCP authorization procedure */
1106static int intel_hdcp_check_link(struct intel_connector *connector)
1107{
1108	struct intel_display *display = to_intel_display(connector);
1109	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1110	struct intel_hdcp *hdcp = &connector->hdcp;
1111	enum port port = dig_port->base.port;
1112	enum transcoder cpu_transcoder;
1113	int ret = 0;
1114
1115	mutex_lock(&hdcp->mutex);
1116	mutex_lock(&dig_port->hdcp_mutex);
1117
1118	cpu_transcoder = hdcp->cpu_transcoder;
1119
1120	/* Check_link valid only when HDCP1.4 is enabled */
1121	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1122	    !hdcp->hdcp_encrypted) {
1123		ret = -EINVAL;
1124		goto out;
1125	}
1126
1127	if (drm_WARN_ON(display->drm,
1128			!intel_hdcp_in_use(display, cpu_transcoder, port))) {
1129		drm_err(display->drm,
1130			"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1131			connector->base.base.id, connector->base.name,
1132			intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)));
1133		ret = -ENXIO;
1134		intel_hdcp_update_value(connector,
1135					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1136					true);
1137		goto out;
1138	}
1139
1140	if (hdcp->shim->check_link(dig_port, connector)) {
1141		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1142			intel_hdcp_update_value(connector,
1143				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1144		}
1145		goto out;
1146	}
1147
1148	drm_dbg_kms(display->drm,
1149		    "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1150		    connector->base.base.id, connector->base.name);
1151
1152	ret = _intel_hdcp_disable(connector);
1153	if (ret) {
1154		drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret);
1155		intel_hdcp_update_value(connector,
1156					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1157					true);
1158		goto out;
1159	}
1160
1161	ret = intel_hdcp1_enable(connector);
1162	if (ret) {
1163		drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
1164		intel_hdcp_update_value(connector,
1165					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1166					true);
1167		goto out;
1168	}
1169
1170out:
1171	mutex_unlock(&dig_port->hdcp_mutex);
1172	mutex_unlock(&hdcp->mutex);
1173	return ret;
1174}
1175
1176static void intel_hdcp_prop_work(struct work_struct *work)
1177{
1178	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1179					       prop_work);
1180	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1181	struct intel_display *display = to_intel_display(connector);
1182
1183	drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
1184	mutex_lock(&hdcp->mutex);
1185
1186	/*
1187	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1188	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1189	 * we're running just after hdcp has been disabled, so just exit
1190	 */
1191	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1192		drm_hdcp_update_content_protection(&connector->base,
1193						   hdcp->value);
1194
1195	mutex_unlock(&hdcp->mutex);
1196	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1197
1198	drm_connector_put(&connector->base);
1199}
1200
1201bool is_hdcp_supported(struct intel_display *display, enum port port)
1202{
1203	return DISPLAY_RUNTIME_INFO(display)->has_hdcp &&
1204		(DISPLAY_VER(display) >= 12 || port < PORT_E);
1205}
1206
1207static int
1208hdcp2_prepare_ake_init(struct intel_connector *connector,
1209		       struct hdcp2_ake_init *ake_data)
1210{
1211	struct intel_display *display = to_intel_display(connector);
1212	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1213	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1214	struct i915_hdcp_arbiter *arbiter;
1215	int ret;
1216
1217	mutex_lock(&display->hdcp.hdcp_mutex);
1218	arbiter = display->hdcp.arbiter;
1219
1220	if (!arbiter || !arbiter->ops) {
1221		mutex_unlock(&display->hdcp.hdcp_mutex);
1222		return -EINVAL;
1223	}
1224
1225	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1226	if (ret)
1227		drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n",
1228			    ret);
1229	mutex_unlock(&display->hdcp.hdcp_mutex);
1230
1231	return ret;
1232}
1233
1234static int
1235hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1236				struct hdcp2_ake_send_cert *rx_cert,
1237				bool *paired,
1238				struct hdcp2_ake_no_stored_km *ek_pub_km,
1239				size_t *msg_sz)
1240{
1241	struct intel_display *display = to_intel_display(connector);
1242	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1243	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1244	struct i915_hdcp_arbiter *arbiter;
1245	int ret;
1246
1247	mutex_lock(&display->hdcp.hdcp_mutex);
1248	arbiter = display->hdcp.arbiter;
1249
1250	if (!arbiter || !arbiter->ops) {
1251		mutex_unlock(&display->hdcp.hdcp_mutex);
1252		return -EINVAL;
1253	}
1254
1255	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1256							 rx_cert, paired,
1257							 ek_pub_km, msg_sz);
1258	if (ret < 0)
1259		drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n",
1260			    ret);
1261	mutex_unlock(&display->hdcp.hdcp_mutex);
1262
1263	return ret;
1264}
1265
1266static int hdcp2_verify_hprime(struct intel_connector *connector,
1267			       struct hdcp2_ake_send_hprime *rx_hprime)
1268{
1269	struct intel_display *display = to_intel_display(connector);
1270	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1271	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1272	struct i915_hdcp_arbiter *arbiter;
1273	int ret;
1274
1275	mutex_lock(&display->hdcp.hdcp_mutex);
1276	arbiter = display->hdcp.arbiter;
1277
1278	if (!arbiter || !arbiter->ops) {
1279		mutex_unlock(&display->hdcp.hdcp_mutex);
1280		return -EINVAL;
1281	}
1282
1283	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1284	if (ret < 0)
1285		drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret);
1286	mutex_unlock(&display->hdcp.hdcp_mutex);
1287
1288	return ret;
1289}
1290
1291static int
1292hdcp2_store_pairing_info(struct intel_connector *connector,
1293			 struct hdcp2_ake_send_pairing_info *pairing_info)
1294{
1295	struct intel_display *display = to_intel_display(connector);
1296	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1297	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1298	struct i915_hdcp_arbiter *arbiter;
1299	int ret;
1300
1301	mutex_lock(&display->hdcp.hdcp_mutex);
1302	arbiter = display->hdcp.arbiter;
1303
1304	if (!arbiter || !arbiter->ops) {
1305		mutex_unlock(&display->hdcp.hdcp_mutex);
1306		return -EINVAL;
1307	}
1308
1309	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1310	if (ret < 0)
1311		drm_dbg_kms(display->drm, "Store pairing info failed. %d\n",
1312			    ret);
1313	mutex_unlock(&display->hdcp.hdcp_mutex);
1314
1315	return ret;
1316}
1317
1318static int
1319hdcp2_prepare_lc_init(struct intel_connector *connector,
1320		      struct hdcp2_lc_init *lc_init)
1321{
1322	struct intel_display *display = to_intel_display(connector);
1323	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1324	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1325	struct i915_hdcp_arbiter *arbiter;
1326	int ret;
1327
1328	mutex_lock(&display->hdcp.hdcp_mutex);
1329	arbiter = display->hdcp.arbiter;
1330
1331	if (!arbiter || !arbiter->ops) {
1332		mutex_unlock(&display->hdcp.hdcp_mutex);
1333		return -EINVAL;
1334	}
1335
1336	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1337	if (ret < 0)
1338		drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n",
1339			    ret);
1340	mutex_unlock(&display->hdcp.hdcp_mutex);
1341
1342	return ret;
1343}
1344
1345static int
1346hdcp2_verify_lprime(struct intel_connector *connector,
1347		    struct hdcp2_lc_send_lprime *rx_lprime)
1348{
1349	struct intel_display *display = to_intel_display(connector);
1350	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1351	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1352	struct i915_hdcp_arbiter *arbiter;
1353	int ret;
1354
1355	mutex_lock(&display->hdcp.hdcp_mutex);
1356	arbiter = display->hdcp.arbiter;
1357
1358	if (!arbiter || !arbiter->ops) {
1359		mutex_unlock(&display->hdcp.hdcp_mutex);
1360		return -EINVAL;
1361	}
1362
1363	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1364	if (ret < 0)
1365		drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n",
1366			    ret);
1367	mutex_unlock(&display->hdcp.hdcp_mutex);
1368
1369	return ret;
1370}
1371
1372static int hdcp2_prepare_skey(struct intel_connector *connector,
1373			      struct hdcp2_ske_send_eks *ske_data)
1374{
1375	struct intel_display *display = to_intel_display(connector);
1376	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1377	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1378	struct i915_hdcp_arbiter *arbiter;
1379	int ret;
1380
1381	mutex_lock(&display->hdcp.hdcp_mutex);
1382	arbiter = display->hdcp.arbiter;
1383
1384	if (!arbiter || !arbiter->ops) {
1385		mutex_unlock(&display->hdcp.hdcp_mutex);
1386		return -EINVAL;
1387	}
1388
1389	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1390	if (ret < 0)
1391		drm_dbg_kms(display->drm, "Get session key failed. %d\n",
1392			    ret);
1393	mutex_unlock(&display->hdcp.hdcp_mutex);
1394
1395	return ret;
1396}
1397
1398static int
1399hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1400				      struct hdcp2_rep_send_receiverid_list
1401								*rep_topology,
1402				      struct hdcp2_rep_send_ack *rep_send_ack)
1403{
1404	struct intel_display *display = to_intel_display(connector);
1405	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1406	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1407	struct i915_hdcp_arbiter *arbiter;
1408	int ret;
1409
1410	mutex_lock(&display->hdcp.hdcp_mutex);
1411	arbiter = display->hdcp.arbiter;
1412
1413	if (!arbiter || !arbiter->ops) {
1414		mutex_unlock(&display->hdcp.hdcp_mutex);
1415		return -EINVAL;
1416	}
1417
1418	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1419							    data,
1420							    rep_topology,
1421							    rep_send_ack);
1422	if (ret < 0)
1423		drm_dbg_kms(display->drm,
1424			    "Verify rep topology failed. %d\n", ret);
1425	mutex_unlock(&display->hdcp.hdcp_mutex);
1426
1427	return ret;
1428}
1429
1430static int
1431hdcp2_verify_mprime(struct intel_connector *connector,
1432		    struct hdcp2_rep_stream_ready *stream_ready)
1433{
1434	struct intel_display *display = to_intel_display(connector);
1435	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1436	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1437	struct i915_hdcp_arbiter *arbiter;
1438	int ret;
1439
1440	mutex_lock(&display->hdcp.hdcp_mutex);
1441	arbiter = display->hdcp.arbiter;
1442
1443	if (!arbiter || !arbiter->ops) {
1444		mutex_unlock(&display->hdcp.hdcp_mutex);
1445		return -EINVAL;
1446	}
1447
1448	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1449	if (ret < 0)
1450		drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret);
1451	mutex_unlock(&display->hdcp.hdcp_mutex);
1452
1453	return ret;
1454}
1455
1456static int hdcp2_authenticate_port(struct intel_connector *connector)
1457{
1458	struct intel_display *display = to_intel_display(connector);
1459	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1460	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1461	struct i915_hdcp_arbiter *arbiter;
1462	int ret;
1463
1464	mutex_lock(&display->hdcp.hdcp_mutex);
1465	arbiter = display->hdcp.arbiter;
1466
1467	if (!arbiter || !arbiter->ops) {
1468		mutex_unlock(&display->hdcp.hdcp_mutex);
1469		return -EINVAL;
1470	}
1471
1472	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1473	if (ret < 0)
1474		drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n",
1475			    ret);
1476	mutex_unlock(&display->hdcp.hdcp_mutex);
1477
1478	return ret;
1479}
1480
1481static int hdcp2_close_session(struct intel_connector *connector)
1482{
1483	struct intel_display *display = to_intel_display(connector);
1484	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1485	struct i915_hdcp_arbiter *arbiter;
1486	int ret;
1487
1488	mutex_lock(&display->hdcp.hdcp_mutex);
1489	arbiter = display->hdcp.arbiter;
1490
1491	if (!arbiter || !arbiter->ops) {
1492		mutex_unlock(&display->hdcp.hdcp_mutex);
1493		return -EINVAL;
1494	}
1495
1496	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1497					     &dig_port->hdcp_port_data);
1498	mutex_unlock(&display->hdcp.hdcp_mutex);
1499
1500	return ret;
1501}
1502
1503static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1504{
1505	return hdcp2_close_session(connector);
1506}
1507
1508/* Authentication flow starts from here */
1509static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1510{
1511	struct intel_display *display = to_intel_display(connector);
1512	struct intel_digital_port *dig_port =
1513		intel_attached_dig_port(connector);
1514	struct intel_hdcp *hdcp = &connector->hdcp;
1515	union {
1516		struct hdcp2_ake_init ake_init;
1517		struct hdcp2_ake_send_cert send_cert;
1518		struct hdcp2_ake_no_stored_km no_stored_km;
1519		struct hdcp2_ake_send_hprime send_hprime;
1520		struct hdcp2_ake_send_pairing_info pairing_info;
1521	} msgs;
1522	const struct intel_hdcp_shim *shim = hdcp->shim;
1523	size_t size;
1524	int ret, i, max_retries;
1525
1526	/* Init for seq_num */
1527	hdcp->seq_num_v = 0;
1528	hdcp->seq_num_m = 0;
1529
1530	if (intel_encoder_is_dp(&dig_port->base) ||
1531	    intel_encoder_is_mst(&dig_port->base))
1532		max_retries = 10;
1533	else
1534		max_retries = 1;
1535
1536	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1537	if (ret < 0)
1538		return ret;
1539
1540	/*
1541	 * Retry the first read and write to downstream at least 10 times
1542	 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
1543	 * (dock decides to stop advertising hdcp2 capability for some reason).
1544	 * The reason being that during suspend resume dock usually keeps the
1545	 * HDCP2 registers inaccesible causing AUX error. This wouldn't be a
1546	 * big problem if the userspace just kept retrying with some delay while
1547	 * it continues to play low value content but most userpace applications
1548	 * end up throwing an error when it receives one from KMD. This makes
1549	 * sure we give the dock and the sink devices to complete its power cycle
1550	 * and then try HDCP authentication. The values of 10 and delay of 50ms
1551	 * was decided based on multiple trial and errors.
1552	 */
1553	for (i = 0; i < max_retries; i++) {
1554		if (!intel_hdcp2_get_capability(connector)) {
1555			msleep(50);
1556			continue;
1557		}
1558
1559		ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1560					  sizeof(msgs.ake_init));
1561		if (ret < 0)
1562			continue;
1563
1564		ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1565					 &msgs.send_cert, sizeof(msgs.send_cert));
1566		if (ret > 0)
1567			break;
1568	}
1569
1570	if (ret < 0)
1571		return ret;
1572
1573	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1574		drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n");
1575		return -EINVAL;
1576	}
1577
1578	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1579
1580	if (drm_hdcp_check_ksvs_revoked(display->drm,
1581					msgs.send_cert.cert_rx.receiver_id,
1582					1) > 0) {
1583		drm_err(display->drm, "Receiver ID is revoked\n");
1584		return -EPERM;
1585	}
1586
1587	/*
1588	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1589	 * stored also.
1590	 */
1591	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1592					      &hdcp->is_paired,
1593					      &msgs.no_stored_km, &size);
1594	if (ret < 0)
1595		return ret;
1596
1597	ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1598	if (ret < 0)
1599		return ret;
1600
1601	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1602				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1603	if (ret < 0)
1604		return ret;
1605
1606	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1607	if (ret < 0)
1608		return ret;
1609
1610	if (!hdcp->is_paired) {
1611		/* Pairing is required */
1612		ret = shim->read_2_2_msg(connector,
1613					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1614					 &msgs.pairing_info,
1615					 sizeof(msgs.pairing_info));
1616		if (ret < 0)
1617			return ret;
1618
1619		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1620		if (ret < 0)
1621			return ret;
1622		hdcp->is_paired = true;
1623	}
1624
1625	return 0;
1626}
1627
1628static int hdcp2_locality_check(struct intel_connector *connector)
1629{
1630	struct intel_hdcp *hdcp = &connector->hdcp;
1631	union {
1632		struct hdcp2_lc_init lc_init;
1633		struct hdcp2_lc_send_lprime send_lprime;
1634	} msgs;
1635	const struct intel_hdcp_shim *shim = hdcp->shim;
1636	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1637
1638	for (i = 0; i < tries; i++) {
1639		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1640		if (ret < 0)
1641			continue;
1642
1643		ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1644				      sizeof(msgs.lc_init));
1645		if (ret < 0)
1646			continue;
1647
1648		ret = shim->read_2_2_msg(connector,
1649					 HDCP_2_2_LC_SEND_LPRIME,
1650					 &msgs.send_lprime,
1651					 sizeof(msgs.send_lprime));
1652		if (ret < 0)
1653			continue;
1654
1655		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1656		if (!ret)
1657			break;
1658	}
1659
1660	return ret;
1661}
1662
1663static int hdcp2_session_key_exchange(struct intel_connector *connector)
1664{
1665	struct intel_hdcp *hdcp = &connector->hdcp;
1666	struct hdcp2_ske_send_eks send_eks;
1667	int ret;
1668
1669	ret = hdcp2_prepare_skey(connector, &send_eks);
1670	if (ret < 0)
1671		return ret;
1672
1673	ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1674					sizeof(send_eks));
1675	if (ret < 0)
1676		return ret;
1677
1678	return 0;
1679}
1680
1681static
1682int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1683{
1684	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1685	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1686	struct intel_hdcp *hdcp = &connector->hdcp;
1687	union {
1688		struct hdcp2_rep_stream_manage stream_manage;
1689		struct hdcp2_rep_stream_ready stream_ready;
1690	} msgs;
1691	const struct intel_hdcp_shim *shim = hdcp->shim;
1692	int ret, streams_size_delta, i;
1693
1694	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1695		return -ERANGE;
1696
1697	/* Prepare RepeaterAuth_Stream_Manage msg */
1698	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1699	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1700
1701	msgs.stream_manage.k = cpu_to_be16(data->k);
1702
1703	for (i = 0; i < data->k; i++) {
1704		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1705		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1706	}
1707
1708	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1709				sizeof(struct hdcp2_streamid_type);
1710	/* Send it to Repeater */
1711	ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1712				  sizeof(msgs.stream_manage) - streams_size_delta);
1713	if (ret < 0)
1714		goto out;
1715
1716	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1717				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1718	if (ret < 0)
1719		goto out;
1720
1721	data->seq_num_m = hdcp->seq_num_m;
1722
1723	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1724
1725out:
1726	hdcp->seq_num_m++;
1727
1728	return ret;
1729}
1730
1731static
1732int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1733{
1734	struct intel_display *display = to_intel_display(connector);
1735	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1736	struct intel_hdcp *hdcp = &connector->hdcp;
1737	union {
1738		struct hdcp2_rep_send_receiverid_list recvid_list;
1739		struct hdcp2_rep_send_ack rep_ack;
1740	} msgs;
1741	const struct intel_hdcp_shim *shim = hdcp->shim;
1742	u32 seq_num_v, device_cnt;
1743	u8 *rx_info;
1744	int ret;
1745
1746	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1747				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1748	if (ret < 0)
1749		return ret;
1750
1751	rx_info = msgs.recvid_list.rx_info;
1752
1753	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1754	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1755		drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n");
1756		return -EINVAL;
1757	}
1758
1759	/*
1760	 * MST topology is not Type 1 capable if it contains a downstream
1761	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1762	 */
1763	dig_port->hdcp_mst_type1_capable =
1764		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1765		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1766
1767	if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
1768		drm_dbg_kms(display->drm,
1769			    "HDCP1.x or 2.0 Legacy Device Downstream\n");
1770		return -EINVAL;
1771	}
1772
1773	/* Converting and Storing the seq_num_v to local variable as DWORD */
1774	seq_num_v =
1775		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1776
1777	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1778		drm_dbg_kms(display->drm,
1779			    "Non zero Seq_num_v at first RecvId_List msg\n");
1780		return -EINVAL;
1781	}
1782
1783	if (seq_num_v < hdcp->seq_num_v) {
1784		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1785		drm_dbg_kms(display->drm, "Seq_num_v roll over.\n");
1786		return -EINVAL;
1787	}
1788
1789	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1790		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1791	if (drm_hdcp_check_ksvs_revoked(display->drm,
1792					msgs.recvid_list.receiver_ids,
1793					device_cnt) > 0) {
1794		drm_err(display->drm, "Revoked receiver ID(s) is in list\n");
1795		return -EPERM;
1796	}
1797
1798	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1799						    &msgs.recvid_list,
1800						    &msgs.rep_ack);
1801	if (ret < 0)
1802		return ret;
1803
1804	hdcp->seq_num_v = seq_num_v;
1805	ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1806				  sizeof(msgs.rep_ack));
1807	if (ret < 0)
1808		return ret;
1809
1810	return 0;
1811}
1812
1813static int hdcp2_authenticate_sink(struct intel_connector *connector)
1814{
1815	struct intel_display *display = to_intel_display(connector);
1816	struct intel_hdcp *hdcp = &connector->hdcp;
1817	const struct intel_hdcp_shim *shim = hdcp->shim;
1818	int ret;
1819
1820	ret = hdcp2_authentication_key_exchange(connector);
1821	if (ret < 0) {
1822		drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret);
1823		return ret;
1824	}
1825
1826	ret = hdcp2_locality_check(connector);
1827	if (ret < 0) {
1828		drm_dbg_kms(display->drm,
1829			    "Locality Check failed. Err : %d\n", ret);
1830		return ret;
1831	}
1832
1833	ret = hdcp2_session_key_exchange(connector);
1834	if (ret < 0) {
1835		drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret);
1836		return ret;
1837	}
1838
1839	if (shim->config_stream_type) {
1840		ret = shim->config_stream_type(connector,
1841					       hdcp->is_repeater,
1842					       hdcp->content_type);
1843		if (ret < 0)
1844			return ret;
1845	}
1846
1847	if (hdcp->is_repeater) {
1848		ret = hdcp2_authenticate_repeater_topology(connector);
1849		if (ret < 0) {
1850			drm_dbg_kms(display->drm,
1851				    "Repeater Auth Failed. Err: %d\n", ret);
1852			return ret;
1853		}
1854	}
1855
1856	return ret;
1857}
1858
1859static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1860{
1861	struct intel_display *display = to_intel_display(connector);
1862	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1863	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1864	struct intel_hdcp *hdcp = &connector->hdcp;
1865	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1866	enum port port = dig_port->base.port;
1867	int ret = 0;
1868
1869	if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1870			    LINK_ENCRYPTION_STATUS)) {
1871		drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1872			connector->base.base.id, connector->base.name);
1873		ret = -EPERM;
1874		goto link_recover;
1875	}
1876
1877	if (hdcp->shim->stream_2_2_encryption) {
1878		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1879		if (ret) {
1880			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1881				connector->base.base.id, connector->base.name);
1882			return ret;
1883		}
1884		drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1885			    transcoder_name(hdcp->stream_transcoder));
1886	}
1887
1888	return 0;
1889
1890link_recover:
1891	if (hdcp2_deauthenticate_port(connector) < 0)
1892		drm_dbg_kms(display->drm, "Port deauth failed.\n");
1893
1894	dig_port->hdcp_auth_status = false;
1895	data->k = 0;
1896
1897	return ret;
1898}
1899
1900static int hdcp2_enable_encryption(struct intel_connector *connector)
1901{
1902	struct intel_display *display = to_intel_display(connector);
1903	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1904	struct intel_hdcp *hdcp = &connector->hdcp;
1905	enum port port = dig_port->base.port;
1906	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1907	int ret;
1908
1909	drm_WARN_ON(display->drm,
1910		    intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1911		    LINK_ENCRYPTION_STATUS);
1912	if (hdcp->shim->toggle_signalling) {
1913		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1914						    true);
1915		if (ret) {
1916			drm_err(display->drm,
1917				"Failed to enable HDCP signalling. %d\n",
1918				ret);
1919			return ret;
1920		}
1921	}
1922
1923	if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1924	    LINK_AUTH_STATUS)
1925		/* Link is Authenticated. Now set for Encryption */
1926		intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1927			     0, CTL_LINK_ENCRYPTION_REQ);
1928
1929	ret = intel_de_wait_for_set(display,
1930				    HDCP2_STATUS(display, cpu_transcoder,
1931						 port),
1932				    LINK_ENCRYPTION_STATUS,
1933				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1934	dig_port->hdcp_auth_status = true;
1935
1936	return ret;
1937}
1938
1939static int hdcp2_disable_encryption(struct intel_connector *connector)
1940{
1941	struct intel_display *display = to_intel_display(connector);
1942	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1943	struct intel_hdcp *hdcp = &connector->hdcp;
1944	enum port port = dig_port->base.port;
1945	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1946	int ret;
1947
1948	drm_WARN_ON(display->drm,
1949		    !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1950				    LINK_ENCRYPTION_STATUS));
1951
1952	intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1953		     CTL_LINK_ENCRYPTION_REQ, 0);
1954
1955	ret = intel_de_wait_for_clear(display,
1956				      HDCP2_STATUS(display, cpu_transcoder,
1957						   port),
1958				      LINK_ENCRYPTION_STATUS,
1959				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1960	if (ret == -ETIMEDOUT)
1961		drm_dbg_kms(display->drm, "Disable Encryption Timedout");
1962
1963	if (hdcp->shim->toggle_signalling) {
1964		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1965						    false);
1966		if (ret) {
1967			drm_err(display->drm,
1968				"Failed to disable HDCP signalling. %d\n",
1969				ret);
1970			return ret;
1971		}
1972	}
1973
1974	return ret;
1975}
1976
1977static int
1978hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1979{
1980	struct intel_display *display = to_intel_display(connector);
1981	int i, tries = 3, ret;
1982
1983	if (!connector->hdcp.is_repeater)
1984		return 0;
1985
1986	for (i = 0; i < tries; i++) {
1987		ret = _hdcp2_propagate_stream_management_info(connector);
1988		if (!ret)
1989			break;
1990
1991		/* Lets restart the auth incase of seq_num_m roll over */
1992		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1993			drm_dbg_kms(display->drm,
1994				    "seq_num_m roll over.(%d)\n", ret);
1995			break;
1996		}
1997
1998		drm_dbg_kms(display->drm,
1999			    "HDCP2 stream management %d of %d Failed.(%d)\n",
2000			    i + 1, tries, ret);
2001	}
2002
2003	return ret;
2004}
2005
2006static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
2007					  struct intel_connector *connector)
2008{
2009	struct intel_display *display = to_intel_display(connector);
2010	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2011	int ret = 0, i, tries = 3;
2012
2013	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
2014		ret = hdcp2_authenticate_sink(connector);
2015		if (!ret) {
2016			ret = intel_hdcp_prepare_streams(state, connector);
2017			if (ret) {
2018				drm_dbg_kms(display->drm,
2019					    "Prepare stream failed.(%d)\n",
2020					    ret);
2021				break;
2022			}
2023
2024			ret = hdcp2_propagate_stream_management_info(connector);
2025			if (ret) {
2026				drm_dbg_kms(display->drm,
2027					    "Stream management failed.(%d)\n",
2028					    ret);
2029				break;
2030			}
2031
2032			ret = hdcp2_authenticate_port(connector);
2033			if (!ret)
2034				break;
2035			drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n",
2036				    ret);
2037		}
2038
2039		/* Clearing the mei hdcp session */
2040		drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
2041			    i + 1, tries, ret);
2042		if (hdcp2_deauthenticate_port(connector) < 0)
2043			drm_dbg_kms(display->drm, "Port deauth failed.\n");
2044	}
2045
2046	if (!ret && !dig_port->hdcp_auth_status) {
2047		/*
2048		 * Ensuring the required 200mSec min time interval between
2049		 * Session Key Exchange and encryption.
2050		 */
2051		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2052		ret = hdcp2_enable_encryption(connector);
2053		if (ret < 0) {
2054			drm_dbg_kms(display->drm,
2055				    "Encryption Enable Failed.(%d)\n", ret);
2056			if (hdcp2_deauthenticate_port(connector) < 0)
2057				drm_dbg_kms(display->drm, "Port deauth failed.\n");
2058		}
2059	}
2060
2061	if (!ret)
2062		ret = hdcp2_enable_stream_encryption(connector);
2063
2064	return ret;
2065}
2066
2067static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2068			       struct intel_connector *connector)
2069{
2070	struct intel_display *display = to_intel_display(connector);
2071	struct intel_hdcp *hdcp = &connector->hdcp;
2072	int ret;
2073
2074	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2075		    connector->base.base.id, connector->base.name,
2076		    hdcp->content_type);
2077
2078	intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
2079
2080	ret = hdcp2_authenticate_and_encrypt(state, connector);
2081	if (ret) {
2082		drm_dbg_kms(display->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
2083			    hdcp->content_type, ret);
2084		return ret;
2085	}
2086
2087	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2088		    connector->base.base.id, connector->base.name,
2089		    hdcp->content_type);
2090
2091	hdcp->hdcp2_encrypted = true;
2092	return 0;
2093}
2094
2095static int
2096_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2097{
2098	struct intel_display *display = to_intel_display(connector);
2099	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2100	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2101	struct intel_hdcp *hdcp = &connector->hdcp;
2102	int ret;
2103
2104	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2105		    connector->base.base.id, connector->base.name);
2106
2107	if (hdcp->shim->stream_2_2_encryption) {
2108		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2109		if (ret) {
2110			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2111				connector->base.base.id, connector->base.name);
2112			return ret;
2113		}
2114		drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2115			    transcoder_name(hdcp->stream_transcoder));
2116
2117		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2118			return 0;
2119	}
2120
2121	ret = hdcp2_disable_encryption(connector);
2122
2123	if (hdcp2_deauthenticate_port(connector) < 0)
2124		drm_dbg_kms(display->drm, "Port deauth failed.\n");
2125
2126	connector->hdcp.hdcp2_encrypted = false;
2127	dig_port->hdcp_auth_status = false;
2128	data->k = 0;
2129
2130	return ret;
2131}
2132
2133/* Implements the Link Integrity Check for HDCP2.2 */
2134static int intel_hdcp2_check_link(struct intel_connector *connector)
2135{
2136	struct intel_display *display = to_intel_display(connector);
2137	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2138	struct intel_hdcp *hdcp = &connector->hdcp;
2139	enum port port = dig_port->base.port;
2140	enum transcoder cpu_transcoder;
2141	int ret = 0;
2142
2143	mutex_lock(&hdcp->mutex);
2144	mutex_lock(&dig_port->hdcp_mutex);
2145	cpu_transcoder = hdcp->cpu_transcoder;
2146
2147	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2148	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2149	    !hdcp->hdcp2_encrypted) {
2150		ret = -EINVAL;
2151		goto out;
2152	}
2153
2154	if (drm_WARN_ON(display->drm,
2155			!intel_hdcp2_in_use(display, cpu_transcoder, port))) {
2156		drm_err(display->drm,
2157			"HDCP2.2 link stopped the encryption, %x\n",
2158			intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)));
2159		ret = -ENXIO;
2160		_intel_hdcp2_disable(connector, true);
2161		intel_hdcp_update_value(connector,
2162					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2163					true);
2164		goto out;
2165	}
2166
2167	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2168	if (ret == HDCP_LINK_PROTECTED) {
2169		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2170			intel_hdcp_update_value(connector,
2171					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2172					true);
2173		}
2174		goto out;
2175	}
2176
2177	if (ret == HDCP_TOPOLOGY_CHANGE) {
2178		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2179			goto out;
2180
2181		drm_dbg_kms(display->drm,
2182			    "HDCP2.2 Downstream topology change\n");
2183
2184		ret = hdcp2_authenticate_repeater_topology(connector);
2185		if (!ret) {
2186			intel_hdcp_update_value(connector,
2187						DRM_MODE_CONTENT_PROTECTION_ENABLED,
2188						true);
2189			goto out;
2190		}
2191
2192		drm_dbg_kms(display->drm,
2193			    "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
2194			    connector->base.base.id, connector->base.name,
2195			    ret);
2196	} else {
2197		drm_dbg_kms(display->drm,
2198			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2199			    connector->base.base.id, connector->base.name);
2200	}
2201
2202	ret = _intel_hdcp2_disable(connector, true);
2203	if (ret) {
2204		drm_err(display->drm,
2205			"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2206			connector->base.base.id, connector->base.name, ret);
2207		intel_hdcp_update_value(connector,
2208				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2209		goto out;
2210	}
2211
2212	intel_hdcp_update_value(connector,
2213				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2214out:
2215	mutex_unlock(&dig_port->hdcp_mutex);
2216	mutex_unlock(&hdcp->mutex);
2217	return ret;
2218}
2219
2220static void intel_hdcp_check_work(struct work_struct *work)
2221{
2222	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2223					       struct intel_hdcp,
2224					       check_work);
2225	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2226	struct intel_display *display = to_intel_display(connector);
2227	struct drm_i915_private *i915 = to_i915(display->drm);
2228
2229	if (drm_connector_is_unregistered(&connector->base))
2230		return;
2231
2232	if (!intel_hdcp2_check_link(connector))
2233		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2234				   DRM_HDCP2_CHECK_PERIOD_MS);
2235	else if (!intel_hdcp_check_link(connector))
2236		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2237				   DRM_HDCP_CHECK_PERIOD_MS);
2238}
2239
2240static int i915_hdcp_component_bind(struct device *drv_kdev,
2241				    struct device *mei_kdev, void *data)
2242{
2243	struct intel_display *display = to_intel_display(drv_kdev);
2244
2245	drm_dbg(display->drm, "I915 HDCP comp bind\n");
2246	mutex_lock(&display->hdcp.hdcp_mutex);
2247	display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2248	display->hdcp.arbiter->hdcp_dev = mei_kdev;
2249	mutex_unlock(&display->hdcp.hdcp_mutex);
2250
2251	return 0;
2252}
2253
2254static void i915_hdcp_component_unbind(struct device *drv_kdev,
2255				       struct device *mei_kdev, void *data)
2256{
2257	struct intel_display *display = to_intel_display(drv_kdev);
2258
2259	drm_dbg(display->drm, "I915 HDCP comp unbind\n");
2260	mutex_lock(&display->hdcp.hdcp_mutex);
2261	display->hdcp.arbiter = NULL;
2262	mutex_unlock(&display->hdcp.hdcp_mutex);
2263}
2264
2265static const struct component_ops i915_hdcp_ops = {
2266	.bind   = i915_hdcp_component_bind,
2267	.unbind = i915_hdcp_component_unbind,
2268};
2269
2270static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2271{
2272	switch (port) {
2273	case PORT_A:
2274		return HDCP_DDI_A;
2275	case PORT_B ... PORT_F:
2276		return (enum hdcp_ddi)port;
2277	default:
2278		return HDCP_DDI_INVALID_PORT;
2279	}
2280}
2281
2282static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2283{
2284	switch (cpu_transcoder) {
2285	case TRANSCODER_A ... TRANSCODER_D:
2286		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2287	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2288		return HDCP_INVALID_TRANSCODER;
2289	}
2290}
2291
2292static int initialize_hdcp_port_data(struct intel_connector *connector,
2293				     struct intel_digital_port *dig_port,
2294				     const struct intel_hdcp_shim *shim)
2295{
2296	struct intel_display *display = to_intel_display(connector);
2297	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2298	enum port port = dig_port->base.port;
2299
2300	if (DISPLAY_VER(display) < 12)
2301		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2302	else
2303		/*
2304		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2305		 * with zero(INVALID PORT index).
2306		 */
2307		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2308
2309	/*
2310	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2311	 * is initialized to zero (invalid transcoder index). This will be
2312	 * retained for <Gen12 forever.
2313	 */
2314	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2315
2316	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2317	data->protocol = (u8)shim->protocol;
2318
2319	if (!data->streams)
2320		data->streams = kcalloc(INTEL_NUM_PIPES(display),
2321					sizeof(struct hdcp2_streamid_type),
2322					GFP_KERNEL);
2323	if (!data->streams) {
2324		drm_err(display->drm, "Out of Memory\n");
2325		return -ENOMEM;
2326	}
2327
2328	return 0;
2329}
2330
2331static bool is_hdcp2_supported(struct intel_display *display)
2332{
2333	struct drm_i915_private *i915 = to_i915(display->drm);
2334
2335	if (intel_hdcp_gsc_cs_required(display))
2336		return true;
2337
2338	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2339		return false;
2340
2341	return (DISPLAY_VER(display) >= 10 ||
2342		IS_KABYLAKE(i915) ||
2343		IS_COFFEELAKE(i915) ||
2344		IS_COMETLAKE(i915));
2345}
2346
2347void intel_hdcp_component_init(struct intel_display *display)
2348{
2349	int ret;
2350
2351	if (!is_hdcp2_supported(display))
2352		return;
2353
2354	mutex_lock(&display->hdcp.hdcp_mutex);
2355	drm_WARN_ON(display->drm, display->hdcp.comp_added);
2356
2357	display->hdcp.comp_added = true;
2358	mutex_unlock(&display->hdcp.hdcp_mutex);
2359	if (intel_hdcp_gsc_cs_required(display))
2360		ret = intel_hdcp_gsc_init(display);
2361	else
2362		ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
2363					  I915_COMPONENT_HDCP);
2364
2365	if (ret < 0) {
2366		drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n",
2367			    ret);
2368		mutex_lock(&display->hdcp.hdcp_mutex);
2369		display->hdcp.comp_added = false;
2370		mutex_unlock(&display->hdcp.hdcp_mutex);
2371		return;
2372	}
2373}
2374
2375static void intel_hdcp2_init(struct intel_connector *connector,
2376			     struct intel_digital_port *dig_port,
2377			     const struct intel_hdcp_shim *shim)
2378{
2379	struct intel_display *display = to_intel_display(connector);
2380	struct intel_hdcp *hdcp = &connector->hdcp;
2381	int ret;
2382
2383	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2384	if (ret) {
2385		drm_dbg_kms(display->drm, "Mei hdcp data init failed\n");
2386		return;
2387	}
2388
2389	hdcp->hdcp2_supported = true;
2390}
2391
2392int intel_hdcp_init(struct intel_connector *connector,
2393		    struct intel_digital_port *dig_port,
2394		    const struct intel_hdcp_shim *shim)
2395{
2396	struct intel_display *display = to_intel_display(connector);
2397	struct intel_hdcp *hdcp = &connector->hdcp;
2398	int ret;
2399
2400	if (!shim)
2401		return -EINVAL;
2402
2403	if (is_hdcp2_supported(display))
2404		intel_hdcp2_init(connector, dig_port, shim);
2405
2406	ret = drm_connector_attach_content_protection_property(&connector->base,
2407							       hdcp->hdcp2_supported);
2408	if (ret) {
2409		hdcp->hdcp2_supported = false;
2410		kfree(dig_port->hdcp_port_data.streams);
2411		return ret;
2412	}
2413
2414	hdcp->shim = shim;
2415	mutex_init(&hdcp->mutex);
2416	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2417	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2418	init_waitqueue_head(&hdcp->cp_irq_queue);
2419
2420	return 0;
2421}
2422
2423static int _intel_hdcp_enable(struct intel_atomic_state *state,
2424			      struct intel_encoder *encoder,
2425			      const struct intel_crtc_state *pipe_config,
2426			      const struct drm_connector_state *conn_state)
2427{
2428	struct intel_display *display = to_intel_display(encoder);
2429	struct drm_i915_private *i915 = to_i915(display->drm);
2430	struct intel_connector *connector =
2431		to_intel_connector(conn_state->connector);
2432	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2433	struct intel_hdcp *hdcp = &connector->hdcp;
2434	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2435	int ret = -EINVAL;
2436
2437	if (!hdcp->shim)
2438		return -ENOENT;
2439
2440	if (!connector->encoder) {
2441		drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2442			connector->base.base.id, connector->base.name);
2443		return -ENODEV;
2444	}
2445
2446	mutex_lock(&hdcp->mutex);
2447	mutex_lock(&dig_port->hdcp_mutex);
2448	drm_WARN_ON(display->drm,
2449		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2450	hdcp->content_type = (u8)conn_state->hdcp_content_type;
2451
2452	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2453		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2454		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2455	} else {
2456		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2457		hdcp->stream_transcoder = INVALID_TRANSCODER;
2458	}
2459
2460	if (DISPLAY_VER(display) >= 12)
2461		dig_port->hdcp_port_data.hdcp_transcoder =
2462			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2463
2464	/*
2465	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2466	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2467	 */
2468	if (intel_hdcp2_get_capability(connector)) {
2469		ret = _intel_hdcp2_enable(state, connector);
2470		if (!ret)
2471			check_link_interval =
2472				DRM_HDCP2_CHECK_PERIOD_MS;
2473	}
2474
2475	/*
2476	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2477	 * be attempted.
2478	 */
2479	if (ret && intel_hdcp_get_capability(connector) &&
2480	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2481		ret = intel_hdcp1_enable(connector);
2482	}
2483
2484	if (!ret) {
2485		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2486				   check_link_interval);
2487		intel_hdcp_update_value(connector,
2488					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2489					true);
2490	}
2491
2492	mutex_unlock(&dig_port->hdcp_mutex);
2493	mutex_unlock(&hdcp->mutex);
2494	return ret;
2495}
2496
2497void intel_hdcp_enable(struct intel_atomic_state *state,
2498		       struct intel_encoder *encoder,
2499		       const struct intel_crtc_state *crtc_state,
2500		       const struct drm_connector_state *conn_state)
2501{
2502	struct intel_connector *connector =
2503		to_intel_connector(conn_state->connector);
2504	struct intel_hdcp *hdcp = &connector->hdcp;
2505
2506	/*
2507	 * Enable hdcp if it's desired or if userspace is enabled and
2508	 * driver set its state to undesired
2509	 */
2510	if (conn_state->content_protection ==
2511	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2512	    (conn_state->content_protection ==
2513	    DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2514	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2515		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2516}
2517
2518int intel_hdcp_disable(struct intel_connector *connector)
2519{
2520	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2521	struct intel_hdcp *hdcp = &connector->hdcp;
2522	int ret = 0;
2523
2524	if (!hdcp->shim)
2525		return -ENOENT;
2526
2527	mutex_lock(&hdcp->mutex);
2528	mutex_lock(&dig_port->hdcp_mutex);
2529
2530	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2531		goto out;
2532
2533	intel_hdcp_update_value(connector,
2534				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2535	if (hdcp->hdcp2_encrypted)
2536		ret = _intel_hdcp2_disable(connector, false);
2537	else if (hdcp->hdcp_encrypted)
2538		ret = _intel_hdcp_disable(connector);
2539
2540out:
2541	mutex_unlock(&dig_port->hdcp_mutex);
2542	mutex_unlock(&hdcp->mutex);
2543	cancel_delayed_work_sync(&hdcp->check_work);
2544	return ret;
2545}
2546
2547void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2548			    struct intel_encoder *encoder,
2549			    const struct intel_crtc_state *crtc_state,
2550			    const struct drm_connector_state *conn_state)
2551{
2552	struct intel_connector *connector =
2553				to_intel_connector(conn_state->connector);
2554	struct intel_hdcp *hdcp = &connector->hdcp;
2555	bool content_protection_type_changed, desired_and_not_enabled = false;
2556	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2557
2558	if (!connector->hdcp.shim)
2559		return;
2560
2561	content_protection_type_changed =
2562		(conn_state->hdcp_content_type != hdcp->content_type &&
2563		 conn_state->content_protection !=
2564		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2565
2566	/*
2567	 * During the HDCP encryption session if Type change is requested,
2568	 * disable the HDCP and reenable it with new TYPE value.
2569	 */
2570	if (conn_state->content_protection ==
2571	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2572	    content_protection_type_changed)
2573		intel_hdcp_disable(connector);
2574
2575	/*
2576	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2577	 * change procedure.
2578	 */
2579	if (content_protection_type_changed) {
2580		mutex_lock(&hdcp->mutex);
2581		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2582		drm_connector_get(&connector->base);
2583		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2584			drm_connector_put(&connector->base);
2585		mutex_unlock(&hdcp->mutex);
2586	}
2587
2588	if (conn_state->content_protection ==
2589	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2590		mutex_lock(&hdcp->mutex);
2591		/* Avoid enabling hdcp, if it already ENABLED */
2592		desired_and_not_enabled =
2593			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2594		mutex_unlock(&hdcp->mutex);
2595		/*
2596		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2597		 * prop_work to update correct CP property to user space.
2598		 */
2599		if (!desired_and_not_enabled && !content_protection_type_changed) {
2600			drm_connector_get(&connector->base);
2601			if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2602				drm_connector_put(&connector->base);
2603
2604		}
2605	}
2606
2607	if (desired_and_not_enabled || content_protection_type_changed)
2608		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2609}
2610
2611void intel_hdcp_component_fini(struct intel_display *display)
2612{
2613	mutex_lock(&display->hdcp.hdcp_mutex);
2614	if (!display->hdcp.comp_added) {
2615		mutex_unlock(&display->hdcp.hdcp_mutex);
2616		return;
2617	}
2618
2619	display->hdcp.comp_added = false;
2620	mutex_unlock(&display->hdcp.hdcp_mutex);
2621
2622	if (intel_hdcp_gsc_cs_required(display))
2623		intel_hdcp_gsc_fini(display);
2624	else
2625		component_del(display->drm->dev, &i915_hdcp_ops);
2626}
2627
2628void intel_hdcp_cleanup(struct intel_connector *connector)
2629{
2630	struct intel_hdcp *hdcp = &connector->hdcp;
2631
2632	if (!hdcp->shim)
2633		return;
2634
2635	/*
2636	 * If the connector is registered, it's possible userspace could kick
2637	 * off another HDCP enable, which would re-spawn the workers.
2638	 */
2639	drm_WARN_ON(connector->base.dev,
2640		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2641
2642	/*
2643	 * Now that the connector is not registered, check_work won't be run,
2644	 * but cancel any outstanding instances of it
2645	 */
2646	cancel_delayed_work_sync(&hdcp->check_work);
2647
2648	/*
2649	 * We don't cancel prop_work in the same way as check_work since it
2650	 * requires connection_mutex which could be held while calling this
2651	 * function. Instead, we rely on the connector references grabbed before
2652	 * scheduling prop_work to ensure the connector is alive when prop_work
2653	 * is run. So if we're in the destroy path (which is where this
2654	 * function should be called), we're "guaranteed" that prop_work is not
2655	 * active (tl;dr This Should Never Happen).
2656	 */
2657	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2658
2659	mutex_lock(&hdcp->mutex);
2660	hdcp->shim = NULL;
2661	mutex_unlock(&hdcp->mutex);
2662}
2663
2664void intel_hdcp_atomic_check(struct drm_connector *connector,
2665			     struct drm_connector_state *old_state,
2666			     struct drm_connector_state *new_state)
2667{
2668	u64 old_cp = old_state->content_protection;
2669	u64 new_cp = new_state->content_protection;
2670	struct drm_crtc_state *crtc_state;
2671
2672	if (!new_state->crtc) {
2673		/*
2674		 * If the connector is being disabled with CP enabled, mark it
2675		 * desired so it's re-enabled when the connector is brought back
2676		 */
2677		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2678			new_state->content_protection =
2679				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2680		return;
2681	}
2682
2683	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2684						   new_state->crtc);
2685	/*
2686	 * Fix the HDCP uapi content protection state in case of modeset.
2687	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2688	 * need to be sent if there is transition from ENABLED->DESIRED.
2689	 */
2690	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2691	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2692	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2693		new_state->content_protection =
2694			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2695
2696	/*
2697	 * Nothing to do if the state didn't change, or HDCP was activated since
2698	 * the last commit. And also no change in hdcp content type.
2699	 */
2700	if (old_cp == new_cp ||
2701	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2702	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2703		if (old_state->hdcp_content_type ==
2704				new_state->hdcp_content_type)
2705			return;
2706	}
2707
2708	crtc_state->mode_changed = true;
2709}
2710
2711/* Handles the CP_IRQ raised from the DP HDCP sink */
2712void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2713{
2714	struct intel_hdcp *hdcp = &connector->hdcp;
2715	struct intel_display *display = to_intel_display(connector);
2716	struct drm_i915_private *i915 = to_i915(display->drm);
2717
2718	if (!hdcp->shim)
2719		return;
2720
2721	atomic_inc(&connector->hdcp.cp_irq_count);
2722	wake_up_all(&connector->hdcp.cp_irq_queue);
2723
2724	queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2725}