Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright (C) 2017 Google, Inc.
   4 * Copyright _ 2017-2019, Intel Corporation.
   5 *
   6 * Authors:
   7 * Sean Paul <seanpaul@chromium.org>
   8 * Ramalingam C <ramalingam.c@intel.com>
   9 */
  10
  11#include <linux/component.h>
  12#include <linux/i2c.h>
  13#include <linux/random.h>
  14
  15#include <drm/drm_hdcp.h>
  16#include <drm/i915_component.h>
  17
  18#include "i915_drv.h"
  19#include "i915_reg.h"
  20#include "intel_display_power.h"
  21#include "intel_de.h"
  22#include "intel_display_types.h"
  23#include "intel_hdcp.h"
  24#include "intel_sideband.h"
  25#include "intel_connector.h"
  26
  27#define KEY_LOAD_TRIES	5
  28#define HDCP2_LC_RETRY_CNT			3
  29
  30static int intel_conn_to_vcpi(struct intel_connector *connector)
  31{
  32	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
  33	return connector->port	? connector->port->vcpi.vcpi : 0;
  34}
  35
  36static bool
  37intel_streams_type1_capable(struct intel_connector *connector)
  38{
  39	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
  40	bool capable = false;
  41
  42	if (!shim)
  43		return capable;
  44
  45	if (shim->streams_type1_capable)
  46		shim->streams_type1_capable(connector, &capable);
  47
  48	return capable;
  49}
  50
  51/*
  52 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
  53 * content_type for all streams in DP MST topology because security f/w doesn't
  54 * have any provision to mark content_type for each stream separately, it marks
  55 * all available streams with the content_type proivided at the time of port
  56 * authentication. This may prohibit the userspace to use type1 content on
  57 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
  58 * DP MST topology. Though it is not compulsory, security fw should change its
  59 * policy to mark different content_types for different streams.
  60 */
  61static int
  62intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
  63{
  64	struct drm_connector_list_iter conn_iter;
  65	struct intel_digital_port *conn_dig_port;
  66	struct intel_connector *connector;
  67	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  68	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
  69	bool enforce_type0 = false;
  70	int k;
  71
  72	data->k = 0;
  73
  74	if (dig_port->hdcp_auth_status)
  75		return 0;
  76
  77	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
  78	for_each_intel_connector_iter(connector, &conn_iter) {
  79		if (connector->base.status == connector_status_disconnected)
  80			continue;
  81
  82		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
  83			continue;
  84
  85		conn_dig_port = intel_attached_dig_port(connector);
  86		if (conn_dig_port != dig_port)
  87			continue;
  88
  89		if (!enforce_type0 && !intel_streams_type1_capable(connector))
  90			enforce_type0 = true;
  91
  92		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
  93		data->k++;
  94
  95		/* if there is only one active stream */
  96		if (dig_port->dp.active_mst_links <= 1)
  97			break;
  98	}
  99	drm_connector_list_iter_end(&conn_iter);
 100
 101	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
 102		return -EINVAL;
 103
 104	/*
 105	 * Apply common protection level across all streams in DP MST Topology.
 106	 * Use highest supported content type for all streams in DP MST Topology.
 107	 */
 108	for (k = 0; k < data->k; k++)
 109		data->streams[k].stream_type =
 110			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
 111
 112	return 0;
 113}
 114
 115static
 116bool intel_hdcp_is_ksv_valid(u8 *ksv)
 117{
 118	int i, ones = 0;
 119	/* KSV has 20 1's and 20 0's */
 120	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
 121		ones += hweight8(ksv[i]);
 122	if (ones != 20)
 123		return false;
 124
 125	return true;
 126}
 127
 128static
 129int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
 130			       const struct intel_hdcp_shim *shim, u8 *bksv)
 131{
 132	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 133	int ret, i, tries = 2;
 134
 135	/* HDCP spec states that we must retry the bksv if it is invalid */
 136	for (i = 0; i < tries; i++) {
 137		ret = shim->read_bksv(dig_port, bksv);
 138		if (ret)
 139			return ret;
 140		if (intel_hdcp_is_ksv_valid(bksv))
 141			break;
 142	}
 143	if (i == tries) {
 144		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
 145		return -ENODEV;
 146	}
 147
 148	return 0;
 149}
 150
 151/* Is HDCP1.4 capable on Platform and Sink */
 152bool intel_hdcp_capable(struct intel_connector *connector)
 153{
 154	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 155	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 156	bool capable = false;
 157	u8 bksv[5];
 158
 159	if (!shim)
 160		return capable;
 161
 162	if (shim->hdcp_capable) {
 163		shim->hdcp_capable(dig_port, &capable);
 164	} else {
 165		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
 166			capable = true;
 167	}
 168
 169	return capable;
 170}
 171
 172/* Is HDCP2.2 capable on Platform and Sink */
 173bool intel_hdcp2_capable(struct intel_connector *connector)
 174{
 175	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 176	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 177	struct intel_hdcp *hdcp = &connector->hdcp;
 178	bool capable = false;
 179
 180	/* I915 support for HDCP2.2 */
 181	if (!hdcp->hdcp2_supported)
 182		return false;
 183
 184	/* MEI interface is solid */
 185	mutex_lock(&dev_priv->hdcp_comp_mutex);
 186	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
 187		mutex_unlock(&dev_priv->hdcp_comp_mutex);
 188		return false;
 189	}
 190	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 191
 192	/* Sink's capability for HDCP2.2 */
 193	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
 194
 195	return capable;
 196}
 197
 198static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
 199			      enum transcoder cpu_transcoder, enum port port)
 200{
 201	return intel_de_read(dev_priv,
 202	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 203	       HDCP_STATUS_ENC;
 204}
 205
 206static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
 207			       enum transcoder cpu_transcoder, enum port port)
 208{
 209	return intel_de_read(dev_priv,
 210	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
 211	       LINK_ENCRYPTION_STATUS;
 212}
 213
 214static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 215				    const struct intel_hdcp_shim *shim)
 216{
 217	int ret, read_ret;
 218	bool ksv_ready;
 219
 220	/* Poll for ksv list ready (spec says max time allowed is 5s) */
 221	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
 222							 &ksv_ready),
 223			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
 224			 100 * 1000);
 225	if (ret)
 226		return ret;
 227	if (read_ret)
 228		return read_ret;
 229	if (!ksv_ready)
 230		return -ETIMEDOUT;
 231
 232	return 0;
 233}
 234
 235static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
 236{
 237	enum i915_power_well_id id;
 238	intel_wakeref_t wakeref;
 239	bool enabled = false;
 240
 241	/*
 242	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
 243	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
 244	 */
 245	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 246		id = HSW_DISP_PW_GLOBAL;
 247	else
 248		id = SKL_DISP_PW_1;
 249
 250	/* PG1 (power well #1) needs to be enabled */
 251	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
 252		enabled = intel_display_power_well_is_enabled(dev_priv, id);
 253
 254	/*
 255	 * Another req for hdcp key loadability is enabled state of pll for
 256	 * cdclk. Without active crtc we wont land here. So we are assuming that
 257	 * cdclk is already on.
 258	 */
 259
 260	return enabled;
 261}
 262
 263static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
 264{
 265	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
 266	intel_de_write(dev_priv, HDCP_KEY_STATUS,
 267		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 268}
 269
 270static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 271{
 272	int ret;
 273	u32 val;
 274
 275	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
 276	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 277		return 0;
 278
 279	/*
 280	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
 281	 * out of reset. So if Key is not already loaded, its an error state.
 282	 */
 283	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 284		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 285			return -ENXIO;
 286
 287	/*
 288	 * Initiate loading the HDCP key from fuses.
 289	 *
 290	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
 291	 * version 9 platforms (minus BXT) differ in the key load trigger
 292	 * process from other platforms. These platforms use the GT Driver
 293	 * Mailbox interface.
 294	 */
 295	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
 296		ret = sandybridge_pcode_write(dev_priv,
 297					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
 298		if (ret) {
 299			drm_err(&dev_priv->drm,
 300				"Failed to initiate HDCP key load (%d)\n",
 301				ret);
 302			return ret;
 303		}
 304	} else {
 305		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 306	}
 307
 308	/* Wait for the keys to load (500us) */
 309	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
 310					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 311					10, 1, &val);
 312	if (ret)
 313		return ret;
 314	else if (!(val & HDCP_KEY_LOAD_STATUS))
 315		return -ENXIO;
 316
 317	/* Send Aksv over to PCH display for use in authentication */
 318	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 319
 320	return 0;
 321}
 322
 323/* Returns updated SHA-1 index */
 324static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
 325{
 326	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
 327	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
 328		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
 329		return -ETIMEDOUT;
 330	}
 331	return 0;
 332}
 333
 334static
 335u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
 336				enum transcoder cpu_transcoder, enum port port)
 337{
 338	if (DISPLAY_VER(dev_priv) >= 12) {
 339		switch (cpu_transcoder) {
 340		case TRANSCODER_A:
 341			return HDCP_TRANSA_REP_PRESENT |
 342			       HDCP_TRANSA_SHA1_M0;
 343		case TRANSCODER_B:
 344			return HDCP_TRANSB_REP_PRESENT |
 345			       HDCP_TRANSB_SHA1_M0;
 346		case TRANSCODER_C:
 347			return HDCP_TRANSC_REP_PRESENT |
 348			       HDCP_TRANSC_SHA1_M0;
 349		case TRANSCODER_D:
 350			return HDCP_TRANSD_REP_PRESENT |
 351			       HDCP_TRANSD_SHA1_M0;
 352		default:
 353			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
 354				cpu_transcoder);
 355			return -EINVAL;
 356		}
 357	}
 358
 359	switch (port) {
 360	case PORT_A:
 361		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
 362	case PORT_B:
 363		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
 364	case PORT_C:
 365		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
 366	case PORT_D:
 367		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
 368	case PORT_E:
 369		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 370	default:
 371		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
 372		return -EINVAL;
 373	}
 374}
 375
 376static
 377int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 378				const struct intel_hdcp_shim *shim,
 379				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 380{
 381	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 382	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 383	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 384	enum port port = dig_port->base.port;
 385	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 386	int ret, i, j, sha_idx;
 387
 388	/* Process V' values from the receiver */
 389	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 390		ret = shim->read_v_prime_part(dig_port, i, &vprime);
 391		if (ret)
 392			return ret;
 393		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
 394	}
 395
 396	/*
 397	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
 398	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
 399	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
 400	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
 401	 * index will keep track of our progress through the 64 bytes as well as
 402	 * helping us work the 40-bit KSVs through our 32-bit register.
 403	 *
 404	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
 405	 */
 406	sha_idx = 0;
 407	sha_text = 0;
 408	sha_leftovers = 0;
 409	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
 410	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 411	for (i = 0; i < num_downstream; i++) {
 412		unsigned int sha_empty;
 413		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
 414
 415		/* Fill up the empty slots in sha_text and write it out */
 416		sha_empty = sizeof(sha_text) - sha_leftovers;
 417		for (j = 0; j < sha_empty; j++) {
 418			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
 419			sha_text |= ksv[j] << off;
 420		}
 421
 422		ret = intel_write_sha_text(dev_priv, sha_text);
 423		if (ret < 0)
 424			return ret;
 425
 426		/* Programming guide writes this every 64 bytes */
 427		sha_idx += sizeof(sha_text);
 428		if (!(sha_idx % 64))
 429			intel_de_write(dev_priv, HDCP_REP_CTL,
 430				       rep_ctl | HDCP_SHA1_TEXT_32);
 431
 432		/* Store the leftover bytes from the ksv in sha_text */
 433		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
 434		sha_text = 0;
 435		for (j = 0; j < sha_leftovers; j++)
 436			sha_text |= ksv[sha_empty + j] <<
 437					((sizeof(sha_text) - j - 1) * 8);
 438
 439		/*
 440		 * If we still have room in sha_text for more data, continue.
 441		 * Otherwise, write it out immediately.
 442		 */
 443		if (sizeof(sha_text) > sha_leftovers)
 444			continue;
 445
 446		ret = intel_write_sha_text(dev_priv, sha_text);
 447		if (ret < 0)
 448			return ret;
 449		sha_leftovers = 0;
 450		sha_text = 0;
 451		sha_idx += sizeof(sha_text);
 452	}
 453
 454	/*
 455	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
 456	 * bytes are leftover from the last ksv, we might be able to fit them
 457	 * all in sha_text (first 2 cases), or we might need to split them up
 458	 * into 2 writes (last 2 cases).
 459	 */
 460	if (sha_leftovers == 0) {
 461		/* Write 16 bits of text, 16 bits of M0 */
 462		intel_de_write(dev_priv, HDCP_REP_CTL,
 463			       rep_ctl | HDCP_SHA1_TEXT_16);
 464		ret = intel_write_sha_text(dev_priv,
 465					   bstatus[0] << 8 | bstatus[1]);
 466		if (ret < 0)
 467			return ret;
 468		sha_idx += sizeof(sha_text);
 469
 470		/* Write 32 bits of M0 */
 471		intel_de_write(dev_priv, HDCP_REP_CTL,
 472			       rep_ctl | HDCP_SHA1_TEXT_0);
 473		ret = intel_write_sha_text(dev_priv, 0);
 474		if (ret < 0)
 475			return ret;
 476		sha_idx += sizeof(sha_text);
 477
 478		/* Write 16 bits of M0 */
 479		intel_de_write(dev_priv, HDCP_REP_CTL,
 480			       rep_ctl | HDCP_SHA1_TEXT_16);
 481		ret = intel_write_sha_text(dev_priv, 0);
 482		if (ret < 0)
 483			return ret;
 484		sha_idx += sizeof(sha_text);
 485
 486	} else if (sha_leftovers == 1) {
 487		/* Write 24 bits of text, 8 bits of M0 */
 488		intel_de_write(dev_priv, HDCP_REP_CTL,
 489			       rep_ctl | HDCP_SHA1_TEXT_24);
 490		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 491		/* Only 24-bits of data, must be in the LSB */
 492		sha_text = (sha_text & 0xffffff00) >> 8;
 493		ret = intel_write_sha_text(dev_priv, sha_text);
 494		if (ret < 0)
 495			return ret;
 496		sha_idx += sizeof(sha_text);
 497
 498		/* Write 32 bits of M0 */
 499		intel_de_write(dev_priv, HDCP_REP_CTL,
 500			       rep_ctl | HDCP_SHA1_TEXT_0);
 501		ret = intel_write_sha_text(dev_priv, 0);
 502		if (ret < 0)
 503			return ret;
 504		sha_idx += sizeof(sha_text);
 505
 506		/* Write 24 bits of M0 */
 507		intel_de_write(dev_priv, HDCP_REP_CTL,
 508			       rep_ctl | HDCP_SHA1_TEXT_8);
 509		ret = intel_write_sha_text(dev_priv, 0);
 510		if (ret < 0)
 511			return ret;
 512		sha_idx += sizeof(sha_text);
 513
 514	} else if (sha_leftovers == 2) {
 515		/* Write 32 bits of text */
 516		intel_de_write(dev_priv, HDCP_REP_CTL,
 517			       rep_ctl | HDCP_SHA1_TEXT_32);
 518		sha_text |= bstatus[0] << 8 | bstatus[1];
 519		ret = intel_write_sha_text(dev_priv, sha_text);
 520		if (ret < 0)
 521			return ret;
 522		sha_idx += sizeof(sha_text);
 523
 524		/* Write 64 bits of M0 */
 525		intel_de_write(dev_priv, HDCP_REP_CTL,
 526			       rep_ctl | HDCP_SHA1_TEXT_0);
 527		for (i = 0; i < 2; i++) {
 528			ret = intel_write_sha_text(dev_priv, 0);
 529			if (ret < 0)
 530				return ret;
 531			sha_idx += sizeof(sha_text);
 532		}
 533
 534		/*
 535		 * Terminate the SHA-1 stream by hand. For the other leftover
 536		 * cases this is appended by the hardware.
 537		 */
 538		intel_de_write(dev_priv, HDCP_REP_CTL,
 539			       rep_ctl | HDCP_SHA1_TEXT_32);
 540		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
 541		ret = intel_write_sha_text(dev_priv, sha_text);
 542		if (ret < 0)
 543			return ret;
 544		sha_idx += sizeof(sha_text);
 545	} else if (sha_leftovers == 3) {
 546		/* Write 32 bits of text (filled from LSB) */
 547		intel_de_write(dev_priv, HDCP_REP_CTL,
 548			       rep_ctl | HDCP_SHA1_TEXT_32);
 549		sha_text |= bstatus[0];
 550		ret = intel_write_sha_text(dev_priv, sha_text);
 551		if (ret < 0)
 552			return ret;
 553		sha_idx += sizeof(sha_text);
 554
 555		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
 556		intel_de_write(dev_priv, HDCP_REP_CTL,
 557			       rep_ctl | HDCP_SHA1_TEXT_8);
 558		ret = intel_write_sha_text(dev_priv, bstatus[1]);
 559		if (ret < 0)
 560			return ret;
 561		sha_idx += sizeof(sha_text);
 562
 563		/* Write 32 bits of M0 */
 564		intel_de_write(dev_priv, HDCP_REP_CTL,
 565			       rep_ctl | HDCP_SHA1_TEXT_0);
 566		ret = intel_write_sha_text(dev_priv, 0);
 567		if (ret < 0)
 568			return ret;
 569		sha_idx += sizeof(sha_text);
 570
 571		/* Write 8 bits of M0 */
 572		intel_de_write(dev_priv, HDCP_REP_CTL,
 573			       rep_ctl | HDCP_SHA1_TEXT_24);
 574		ret = intel_write_sha_text(dev_priv, 0);
 575		if (ret < 0)
 576			return ret;
 577		sha_idx += sizeof(sha_text);
 578	} else {
 579		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
 580			    sha_leftovers);
 581		return -EINVAL;
 582	}
 583
 584	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 585	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 586	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 587		ret = intel_write_sha_text(dev_priv, 0);
 588		if (ret < 0)
 589			return ret;
 590		sha_idx += sizeof(sha_text);
 591	}
 592
 593	/*
 594	 * Last write gets the length of the concatenation in bits. That is:
 595	 *  - 5 bytes per device
 596	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
 597	 */
 598	sha_text = (num_downstream * 5 + 10) * 8;
 599	ret = intel_write_sha_text(dev_priv, sha_text);
 600	if (ret < 0)
 601		return ret;
 602
 603	/* Tell the HW we're done with the hash and wait for it to ACK */
 604	intel_de_write(dev_priv, HDCP_REP_CTL,
 605		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 606	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
 607				  HDCP_SHA1_COMPLETE, 1)) {
 608		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
 609		return -ETIMEDOUT;
 610	}
 611	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
 612		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
 613		return -ENXIO;
 614	}
 615
 616	return 0;
 617}
 618
 619/* Implements Part 2 of the HDCP authorization procedure */
 620static
 621int intel_hdcp_auth_downstream(struct intel_connector *connector)
 622{
 623	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 624	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 625	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 626	u8 bstatus[2], num_downstream, *ksv_fifo;
 627	int ret, i, tries = 3;
 628
 629	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
 630	if (ret) {
 631		drm_dbg_kms(&dev_priv->drm,
 632			    "KSV list failed to become ready (%d)\n", ret);
 633		return ret;
 634	}
 635
 636	ret = shim->read_bstatus(dig_port, bstatus);
 637	if (ret)
 638		return ret;
 639
 640	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 641	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
 642		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
 643		return -EPERM;
 644	}
 645
 646	/*
 647	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
 648	 * the HDCP encryption. That implies that repeater can't have its own
 649	 * display. As there is no consumption of encrypted content in the
 650	 * repeater with 0 downstream devices, we are failing the
 651	 * authentication.
 652	 */
 653	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 654	if (num_downstream == 0) {
 655		drm_dbg_kms(&dev_priv->drm,
 656			    "Repeater with zero downstream devices\n");
 657		return -EINVAL;
 658	}
 659
 660	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 661	if (!ksv_fifo) {
 662		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
 663		return -ENOMEM;
 664	}
 665
 666	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
 667	if (ret)
 668		goto err;
 669
 670	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
 671					num_downstream) > 0) {
 672		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
 673		ret = -EPERM;
 674		goto err;
 675	}
 676
 677	/*
 678	 * When V prime mismatches, DP Spec mandates re-read of
 679	 * V prime atleast twice.
 680	 */
 681	for (i = 0; i < tries; i++) {
 682		ret = intel_hdcp_validate_v_prime(connector, shim,
 683						  ksv_fifo, num_downstream,
 684						  bstatus);
 685		if (!ret)
 686			break;
 687	}
 688
 689	if (i == tries) {
 690		drm_dbg_kms(&dev_priv->drm,
 691			    "V Prime validation failed.(%d)\n", ret);
 692		goto err;
 693	}
 694
 695	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
 696		    num_downstream);
 697	ret = 0;
 698err:
 699	kfree(ksv_fifo);
 700	return ret;
 701}
 702
 703/* Implements Part 1 of the HDCP authorization procedure */
 704static int intel_hdcp_auth(struct intel_connector *connector)
 705{
 706	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 707	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 708	struct intel_hdcp *hdcp = &connector->hdcp;
 709	const struct intel_hdcp_shim *shim = hdcp->shim;
 710	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 711	enum port port = dig_port->base.port;
 712	unsigned long r0_prime_gen_start;
 713	int ret, i, tries = 2;
 714	union {
 715		u32 reg[2];
 716		u8 shim[DRM_HDCP_AN_LEN];
 717	} an;
 718	union {
 719		u32 reg[2];
 720		u8 shim[DRM_HDCP_KSV_LEN];
 721	} bksv;
 722	union {
 723		u32 reg;
 724		u8 shim[DRM_HDCP_RI_LEN];
 725	} ri;
 726	bool repeater_present, hdcp_capable;
 727
 728	/*
 729	 * Detects whether the display is HDCP capable. Although we check for
 730	 * valid Bksv below, the HDCP over DP spec requires that we check
 731	 * whether the display supports HDCP before we write An. For HDMI
 732	 * displays, this is not necessary.
 733	 */
 734	if (shim->hdcp_capable) {
 735		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
 736		if (ret)
 737			return ret;
 738		if (!hdcp_capable) {
 739			drm_dbg_kms(&dev_priv->drm,
 740				    "Panel is not HDCP capable\n");
 741			return -EINVAL;
 742		}
 743	}
 744
 745	/* Initialize An with 2 random values and acquire it */
 746	for (i = 0; i < 2; i++)
 747		intel_de_write(dev_priv,
 748			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
 749			       get_random_u32());
 750	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
 751		       HDCP_CONF_CAPTURE_AN);
 752
 753	/* Wait for An to be acquired */
 754	if (intel_de_wait_for_set(dev_priv,
 755				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 756				  HDCP_STATUS_AN_READY, 1)) {
 757		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
 758		return -ETIMEDOUT;
 759	}
 760
 761	an.reg[0] = intel_de_read(dev_priv,
 762				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
 763	an.reg[1] = intel_de_read(dev_priv,
 764				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
 765	ret = shim->write_an_aksv(dig_port, an.shim);
 766	if (ret)
 767		return ret;
 768
 769	r0_prime_gen_start = jiffies;
 770
 771	memset(&bksv, 0, sizeof(bksv));
 772
 773	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
 774	if (ret < 0)
 775		return ret;
 776
 777	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
 778		drm_err(&dev_priv->drm, "BKSV is revoked\n");
 779		return -EPERM;
 780	}
 781
 782	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
 783		       bksv.reg[0]);
 784	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
 785		       bksv.reg[1]);
 786
 787	ret = shim->repeater_present(dig_port, &repeater_present);
 788	if (ret)
 789		return ret;
 790	if (repeater_present)
 791		intel_de_write(dev_priv, HDCP_REP_CTL,
 792			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
 793
 794	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
 795	if (ret)
 796		return ret;
 797
 798	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
 799		       HDCP_CONF_AUTH_AND_ENC);
 800
 801	/* Wait for R0 ready */
 802	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 803		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
 804		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
 805		return -ETIMEDOUT;
 806	}
 807
 808	/*
 809	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
 810	 * some monitors can take longer than this. We'll set the timeout at
 811	 * 300ms just to be sure.
 812	 *
 813	 * On DP, there's an R0_READY bit available but no such bit
 814	 * exists on HDMI. Since the upper-bound is the same, we'll just do
 815	 * the stupid thing instead of polling on one and not the other.
 816	 */
 817	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 818
 819	tries = 3;
 820
 821	/*
 822	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
 823	 * of R0 mismatch.
 824	 */
 825	for (i = 0; i < tries; i++) {
 826		ri.reg = 0;
 827		ret = shim->read_ri_prime(dig_port, ri.shim);
 828		if (ret)
 829			return ret;
 830		intel_de_write(dev_priv,
 831			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
 832			       ri.reg);
 833
 834		/* Wait for Ri prime match */
 835		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 836			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 837			break;
 838	}
 839
 840	if (i == tries) {
 841		drm_dbg_kms(&dev_priv->drm,
 842			    "Timed out waiting for Ri prime match (%x)\n",
 843			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
 844					  cpu_transcoder, port)));
 845		return -ETIMEDOUT;
 846	}
 847
 848	/* Wait for encryption confirmation */
 849	if (intel_de_wait_for_set(dev_priv,
 850				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 851				  HDCP_STATUS_ENC,
 852				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 853		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
 854		return -ETIMEDOUT;
 855	}
 856
 857	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
 858	if (shim->stream_encryption) {
 859		ret = shim->stream_encryption(connector, true);
 860		if (ret) {
 861			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
 862				connector->base.name, connector->base.base.id);
 863			return ret;
 864		}
 865		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
 866			    transcoder_name(hdcp->stream_transcoder));
 867	}
 868
 869	if (repeater_present)
 870		return intel_hdcp_auth_downstream(connector);
 871
 872	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
 873	return 0;
 874}
 875
 876static int _intel_hdcp_disable(struct intel_connector *connector)
 877{
 878	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 879	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 880	struct intel_hdcp *hdcp = &connector->hdcp;
 881	enum port port = dig_port->base.port;
 882	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 883	u32 repeater_ctl;
 884	int ret;
 885
 886	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
 887		    connector->base.name, connector->base.base.id);
 888
 889	if (hdcp->shim->stream_encryption) {
 890		ret = hdcp->shim->stream_encryption(connector, false);
 891		if (ret) {
 892			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
 893				connector->base.name, connector->base.base.id);
 894			return ret;
 895		}
 896		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
 897			    transcoder_name(hdcp->stream_transcoder));
 898		/*
 899		 * If there are other connectors on this port using HDCP,
 900		 * don't disable it until it disabled HDCP encryption for
 901		 * all connectors in MST topology.
 902		 */
 903		if (dig_port->num_hdcp_streams > 0)
 904			return 0;
 905	}
 906
 907	hdcp->hdcp_encrypted = false;
 908	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
 909	if (intel_de_wait_for_clear(dev_priv,
 910				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
 911				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 912		drm_err(&dev_priv->drm,
 913			"Failed to disable HDCP, timeout clearing status\n");
 914		return -ETIMEDOUT;
 915	}
 916
 917	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
 918						   port);
 919	intel_de_write(dev_priv, HDCP_REP_CTL,
 920		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
 921
 922	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
 923	if (ret) {
 924		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
 925		return ret;
 926	}
 927
 928	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
 929	return 0;
 930}
 931
 932static int _intel_hdcp_enable(struct intel_connector *connector)
 933{
 934	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 935	struct intel_hdcp *hdcp = &connector->hdcp;
 936	int i, ret, tries = 3;
 937
 938	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
 939		    connector->base.name, connector->base.base.id);
 940
 941	if (!hdcp_key_loadable(dev_priv)) {
 942		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
 943		return -ENXIO;
 944	}
 945
 946	for (i = 0; i < KEY_LOAD_TRIES; i++) {
 947		ret = intel_hdcp_load_keys(dev_priv);
 948		if (!ret)
 949			break;
 950		intel_hdcp_clear_keys(dev_priv);
 951	}
 952	if (ret) {
 953		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
 954			ret);
 955		return ret;
 956	}
 957
 958	/* Incase of authentication failures, HDCP spec expects reauth. */
 959	for (i = 0; i < tries; i++) {
 960		ret = intel_hdcp_auth(connector);
 961		if (!ret) {
 962			hdcp->hdcp_encrypted = true;
 963			return 0;
 964		}
 965
 966		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
 967
 968		/* Ensuring HDCP encryption and signalling are stopped. */
 969		_intel_hdcp_disable(connector);
 970	}
 971
 972	drm_dbg_kms(&dev_priv->drm,
 973		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
 974	return ret;
 975}
 976
 977static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 978{
 979	return container_of(hdcp, struct intel_connector, hdcp);
 980}
 981
 982static void intel_hdcp_update_value(struct intel_connector *connector,
 983				    u64 value, bool update_property)
 984{
 985	struct drm_device *dev = connector->base.dev;
 986	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 987	struct intel_hdcp *hdcp = &connector->hdcp;
 988
 989	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
 990
 991	if (hdcp->value == value)
 992		return;
 993
 994	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
 995
 996	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
 997		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
 998			dig_port->num_hdcp_streams--;
 999	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1000		dig_port->num_hdcp_streams++;
1001	}
1002
1003	hdcp->value = value;
1004	if (update_property) {
1005		drm_connector_get(&connector->base);
1006		schedule_work(&hdcp->prop_work);
1007	}
1008}
1009
1010/* Implements Part 3 of the HDCP authorization procedure */
1011static int intel_hdcp_check_link(struct intel_connector *connector)
1012{
1013	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1014	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1015	struct intel_hdcp *hdcp = &connector->hdcp;
1016	enum port port = dig_port->base.port;
1017	enum transcoder cpu_transcoder;
1018	int ret = 0;
1019
1020	mutex_lock(&hdcp->mutex);
1021	mutex_lock(&dig_port->hdcp_mutex);
1022
1023	cpu_transcoder = hdcp->cpu_transcoder;
1024
1025	/* Check_link valid only when HDCP1.4 is enabled */
1026	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1027	    !hdcp->hdcp_encrypted) {
1028		ret = -EINVAL;
1029		goto out;
1030	}
1031
1032	if (drm_WARN_ON(&dev_priv->drm,
1033			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1034		drm_err(&dev_priv->drm,
1035			"%s:%d HDCP link stopped encryption,%x\n",
1036			connector->base.name, connector->base.base.id,
1037			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1038		ret = -ENXIO;
1039		intel_hdcp_update_value(connector,
1040					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1041					true);
1042		goto out;
1043	}
1044
1045	if (hdcp->shim->check_link(dig_port, connector)) {
1046		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1047			intel_hdcp_update_value(connector,
1048				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1049		}
1050		goto out;
1051	}
1052
1053	drm_dbg_kms(&dev_priv->drm,
1054		    "[%s:%d] HDCP link failed, retrying authentication\n",
1055		    connector->base.name, connector->base.base.id);
1056
1057	ret = _intel_hdcp_disable(connector);
1058	if (ret) {
1059		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1060		intel_hdcp_update_value(connector,
1061					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1062					true);
1063		goto out;
1064	}
1065
1066	ret = _intel_hdcp_enable(connector);
1067	if (ret) {
1068		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1069		intel_hdcp_update_value(connector,
1070					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1071					true);
1072		goto out;
1073	}
1074
1075out:
1076	mutex_unlock(&dig_port->hdcp_mutex);
1077	mutex_unlock(&hdcp->mutex);
1078	return ret;
1079}
1080
1081static void intel_hdcp_prop_work(struct work_struct *work)
1082{
1083	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1084					       prop_work);
1085	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1086	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1087
1088	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1089	mutex_lock(&hdcp->mutex);
1090
1091	/*
1092	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1093	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1094	 * we're running just after hdcp has been disabled, so just exit
1095	 */
1096	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1097		drm_hdcp_update_content_protection(&connector->base,
1098						   hdcp->value);
1099
1100	mutex_unlock(&hdcp->mutex);
1101	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1102
1103	drm_connector_put(&connector->base);
1104}
1105
1106bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1107{
1108	return INTEL_INFO(dev_priv)->display.has_hdcp &&
1109			(DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1110}
1111
1112static int
1113hdcp2_prepare_ake_init(struct intel_connector *connector,
1114		       struct hdcp2_ake_init *ake_data)
1115{
1116	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1117	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1118	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1119	struct i915_hdcp_comp_master *comp;
1120	int ret;
1121
1122	mutex_lock(&dev_priv->hdcp_comp_mutex);
1123	comp = dev_priv->hdcp_master;
1124
1125	if (!comp || !comp->ops) {
1126		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1127		return -EINVAL;
1128	}
1129
1130	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1131	if (ret)
1132		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1133			    ret);
1134	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1135
1136	return ret;
1137}
1138
1139static int
1140hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1141				struct hdcp2_ake_send_cert *rx_cert,
1142				bool *paired,
1143				struct hdcp2_ake_no_stored_km *ek_pub_km,
1144				size_t *msg_sz)
1145{
1146	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1147	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1148	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1149	struct i915_hdcp_comp_master *comp;
1150	int ret;
1151
1152	mutex_lock(&dev_priv->hdcp_comp_mutex);
1153	comp = dev_priv->hdcp_master;
1154
1155	if (!comp || !comp->ops) {
1156		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1157		return -EINVAL;
1158	}
1159
1160	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1161							 rx_cert, paired,
1162							 ek_pub_km, msg_sz);
1163	if (ret < 0)
1164		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1165			    ret);
1166	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1167
1168	return ret;
1169}
1170
1171static int hdcp2_verify_hprime(struct intel_connector *connector,
1172			       struct hdcp2_ake_send_hprime *rx_hprime)
1173{
1174	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1175	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1176	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1177	struct i915_hdcp_comp_master *comp;
1178	int ret;
1179
1180	mutex_lock(&dev_priv->hdcp_comp_mutex);
1181	comp = dev_priv->hdcp_master;
1182
1183	if (!comp || !comp->ops) {
1184		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1185		return -EINVAL;
1186	}
1187
1188	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1189	if (ret < 0)
1190		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1191	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1192
1193	return ret;
1194}
1195
1196static int
1197hdcp2_store_pairing_info(struct intel_connector *connector,
1198			 struct hdcp2_ake_send_pairing_info *pairing_info)
1199{
1200	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1201	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1202	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1203	struct i915_hdcp_comp_master *comp;
1204	int ret;
1205
1206	mutex_lock(&dev_priv->hdcp_comp_mutex);
1207	comp = dev_priv->hdcp_master;
1208
1209	if (!comp || !comp->ops) {
1210		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1211		return -EINVAL;
1212	}
1213
1214	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1215	if (ret < 0)
1216		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1217			    ret);
1218	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1219
1220	return ret;
1221}
1222
1223static int
1224hdcp2_prepare_lc_init(struct intel_connector *connector,
1225		      struct hdcp2_lc_init *lc_init)
1226{
1227	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1228	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1229	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1230	struct i915_hdcp_comp_master *comp;
1231	int ret;
1232
1233	mutex_lock(&dev_priv->hdcp_comp_mutex);
1234	comp = dev_priv->hdcp_master;
1235
1236	if (!comp || !comp->ops) {
1237		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1238		return -EINVAL;
1239	}
1240
1241	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1242	if (ret < 0)
1243		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1244			    ret);
1245	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1246
1247	return ret;
1248}
1249
1250static int
1251hdcp2_verify_lprime(struct intel_connector *connector,
1252		    struct hdcp2_lc_send_lprime *rx_lprime)
1253{
1254	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1255	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1256	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1257	struct i915_hdcp_comp_master *comp;
1258	int ret;
1259
1260	mutex_lock(&dev_priv->hdcp_comp_mutex);
1261	comp = dev_priv->hdcp_master;
1262
1263	if (!comp || !comp->ops) {
1264		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1265		return -EINVAL;
1266	}
1267
1268	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1269	if (ret < 0)
1270		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1271			    ret);
1272	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1273
1274	return ret;
1275}
1276
1277static int hdcp2_prepare_skey(struct intel_connector *connector,
1278			      struct hdcp2_ske_send_eks *ske_data)
1279{
1280	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1281	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1282	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1283	struct i915_hdcp_comp_master *comp;
1284	int ret;
1285
1286	mutex_lock(&dev_priv->hdcp_comp_mutex);
1287	comp = dev_priv->hdcp_master;
1288
1289	if (!comp || !comp->ops) {
1290		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1291		return -EINVAL;
1292	}
1293
1294	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1295	if (ret < 0)
1296		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1297			    ret);
1298	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1299
1300	return ret;
1301}
1302
1303static int
1304hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1305				      struct hdcp2_rep_send_receiverid_list
1306								*rep_topology,
1307				      struct hdcp2_rep_send_ack *rep_send_ack)
1308{
1309	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1310	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1311	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1312	struct i915_hdcp_comp_master *comp;
1313	int ret;
1314
1315	mutex_lock(&dev_priv->hdcp_comp_mutex);
1316	comp = dev_priv->hdcp_master;
1317
1318	if (!comp || !comp->ops) {
1319		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1320		return -EINVAL;
1321	}
1322
1323	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1324							 rep_topology,
1325							 rep_send_ack);
1326	if (ret < 0)
1327		drm_dbg_kms(&dev_priv->drm,
1328			    "Verify rep topology failed. %d\n", ret);
1329	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1330
1331	return ret;
1332}
1333
1334static int
1335hdcp2_verify_mprime(struct intel_connector *connector,
1336		    struct hdcp2_rep_stream_ready *stream_ready)
1337{
1338	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1339	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1340	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1341	struct i915_hdcp_comp_master *comp;
1342	int ret;
1343
1344	mutex_lock(&dev_priv->hdcp_comp_mutex);
1345	comp = dev_priv->hdcp_master;
1346
1347	if (!comp || !comp->ops) {
1348		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1349		return -EINVAL;
1350	}
1351
1352	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1353	if (ret < 0)
1354		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1355	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1356
1357	return ret;
1358}
1359
1360static int hdcp2_authenticate_port(struct intel_connector *connector)
1361{
1362	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1363	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1364	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1365	struct i915_hdcp_comp_master *comp;
1366	int ret;
1367
1368	mutex_lock(&dev_priv->hdcp_comp_mutex);
1369	comp = dev_priv->hdcp_master;
1370
1371	if (!comp || !comp->ops) {
1372		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1373		return -EINVAL;
1374	}
1375
1376	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1377	if (ret < 0)
1378		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1379			    ret);
1380	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1381
1382	return ret;
1383}
1384
1385static int hdcp2_close_mei_session(struct intel_connector *connector)
1386{
1387	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1388	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1389	struct i915_hdcp_comp_master *comp;
1390	int ret;
1391
1392	mutex_lock(&dev_priv->hdcp_comp_mutex);
1393	comp = dev_priv->hdcp_master;
1394
1395	if (!comp || !comp->ops) {
1396		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1397		return -EINVAL;
1398	}
1399
1400	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1401					     &dig_port->hdcp_port_data);
1402	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1403
1404	return ret;
1405}
1406
1407static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1408{
1409	return hdcp2_close_mei_session(connector);
1410}
1411
1412/* Authentication flow starts from here */
1413static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1414{
1415	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1416	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1417	struct intel_hdcp *hdcp = &connector->hdcp;
1418	union {
1419		struct hdcp2_ake_init ake_init;
1420		struct hdcp2_ake_send_cert send_cert;
1421		struct hdcp2_ake_no_stored_km no_stored_km;
1422		struct hdcp2_ake_send_hprime send_hprime;
1423		struct hdcp2_ake_send_pairing_info pairing_info;
1424	} msgs;
1425	const struct intel_hdcp_shim *shim = hdcp->shim;
1426	size_t size;
1427	int ret;
1428
1429	/* Init for seq_num */
1430	hdcp->seq_num_v = 0;
1431	hdcp->seq_num_m = 0;
1432
1433	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1434	if (ret < 0)
1435		return ret;
1436
1437	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1438				  sizeof(msgs.ake_init));
1439	if (ret < 0)
1440		return ret;
1441
1442	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1443				 &msgs.send_cert, sizeof(msgs.send_cert));
1444	if (ret < 0)
1445		return ret;
1446
1447	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1448		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1449		return -EINVAL;
1450	}
1451
1452	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1453
1454	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1455					msgs.send_cert.cert_rx.receiver_id,
1456					1) > 0) {
1457		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1458		return -EPERM;
1459	}
1460
1461	/*
1462	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1463	 * stored also.
1464	 */
1465	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1466					      &hdcp->is_paired,
1467					      &msgs.no_stored_km, &size);
1468	if (ret < 0)
1469		return ret;
1470
1471	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1472	if (ret < 0)
1473		return ret;
1474
1475	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1476				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1477	if (ret < 0)
1478		return ret;
1479
1480	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1481	if (ret < 0)
1482		return ret;
1483
1484	if (!hdcp->is_paired) {
1485		/* Pairing is required */
1486		ret = shim->read_2_2_msg(dig_port,
1487					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1488					 &msgs.pairing_info,
1489					 sizeof(msgs.pairing_info));
1490		if (ret < 0)
1491			return ret;
1492
1493		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1494		if (ret < 0)
1495			return ret;
1496		hdcp->is_paired = true;
1497	}
1498
1499	return 0;
1500}
1501
1502static int hdcp2_locality_check(struct intel_connector *connector)
1503{
1504	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1505	struct intel_hdcp *hdcp = &connector->hdcp;
1506	union {
1507		struct hdcp2_lc_init lc_init;
1508		struct hdcp2_lc_send_lprime send_lprime;
1509	} msgs;
1510	const struct intel_hdcp_shim *shim = hdcp->shim;
1511	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1512
1513	for (i = 0; i < tries; i++) {
1514		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1515		if (ret < 0)
1516			continue;
1517
1518		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1519				      sizeof(msgs.lc_init));
1520		if (ret < 0)
1521			continue;
1522
1523		ret = shim->read_2_2_msg(dig_port,
1524					 HDCP_2_2_LC_SEND_LPRIME,
1525					 &msgs.send_lprime,
1526					 sizeof(msgs.send_lprime));
1527		if (ret < 0)
1528			continue;
1529
1530		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1531		if (!ret)
1532			break;
1533	}
1534
1535	return ret;
1536}
1537
1538static int hdcp2_session_key_exchange(struct intel_connector *connector)
1539{
1540	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1541	struct intel_hdcp *hdcp = &connector->hdcp;
1542	struct hdcp2_ske_send_eks send_eks;
1543	int ret;
1544
1545	ret = hdcp2_prepare_skey(connector, &send_eks);
1546	if (ret < 0)
1547		return ret;
1548
1549	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1550					sizeof(send_eks));
1551	if (ret < 0)
1552		return ret;
1553
1554	return 0;
1555}
1556
1557static
1558int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1559{
1560	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1561	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1562	struct intel_hdcp *hdcp = &connector->hdcp;
1563	union {
1564		struct hdcp2_rep_stream_manage stream_manage;
1565		struct hdcp2_rep_stream_ready stream_ready;
1566	} msgs;
1567	const struct intel_hdcp_shim *shim = hdcp->shim;
1568	int ret, streams_size_delta, i;
1569
1570	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1571		return -ERANGE;
1572
1573	/* Prepare RepeaterAuth_Stream_Manage msg */
1574	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1575	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1576
1577	msgs.stream_manage.k = cpu_to_be16(data->k);
1578
1579	for (i = 0; i < data->k; i++) {
1580		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1581		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1582	}
1583
1584	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1585				sizeof(struct hdcp2_streamid_type);
1586	/* Send it to Repeater */
1587	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1588				  sizeof(msgs.stream_manage) - streams_size_delta);
1589	if (ret < 0)
1590		goto out;
1591
1592	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1593				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1594	if (ret < 0)
1595		goto out;
1596
1597	data->seq_num_m = hdcp->seq_num_m;
1598
1599	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1600
1601out:
1602	hdcp->seq_num_m++;
1603
1604	return ret;
1605}
1606
1607static
1608int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1609{
1610	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1611	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1612	struct intel_hdcp *hdcp = &connector->hdcp;
1613	union {
1614		struct hdcp2_rep_send_receiverid_list recvid_list;
1615		struct hdcp2_rep_send_ack rep_ack;
1616	} msgs;
1617	const struct intel_hdcp_shim *shim = hdcp->shim;
1618	u32 seq_num_v, device_cnt;
1619	u8 *rx_info;
1620	int ret;
1621
1622	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1623				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1624	if (ret < 0)
1625		return ret;
1626
1627	rx_info = msgs.recvid_list.rx_info;
1628
1629	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1630	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1631		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1632		return -EINVAL;
1633	}
1634
1635	/* Converting and Storing the seq_num_v to local variable as DWORD */
1636	seq_num_v =
1637		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1638
1639	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1640		drm_dbg_kms(&dev_priv->drm,
1641			    "Non zero Seq_num_v at first RecvId_List msg\n");
1642		return -EINVAL;
1643	}
1644
1645	if (seq_num_v < hdcp->seq_num_v) {
1646		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1647		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1648		return -EINVAL;
1649	}
1650
1651	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1652		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1653	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1654					msgs.recvid_list.receiver_ids,
1655					device_cnt) > 0) {
1656		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1657		return -EPERM;
1658	}
1659
1660	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1661						    &msgs.recvid_list,
1662						    &msgs.rep_ack);
1663	if (ret < 0)
1664		return ret;
1665
1666	hdcp->seq_num_v = seq_num_v;
1667	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1668				  sizeof(msgs.rep_ack));
1669	if (ret < 0)
1670		return ret;
1671
1672	return 0;
1673}
1674
1675static int hdcp2_authenticate_sink(struct intel_connector *connector)
1676{
1677	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1678	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1679	struct intel_hdcp *hdcp = &connector->hdcp;
1680	const struct intel_hdcp_shim *shim = hdcp->shim;
1681	int ret;
1682
1683	ret = hdcp2_authentication_key_exchange(connector);
1684	if (ret < 0) {
1685		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1686		return ret;
1687	}
1688
1689	ret = hdcp2_locality_check(connector);
1690	if (ret < 0) {
1691		drm_dbg_kms(&i915->drm,
1692			    "Locality Check failed. Err : %d\n", ret);
1693		return ret;
1694	}
1695
1696	ret = hdcp2_session_key_exchange(connector);
1697	if (ret < 0) {
1698		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1699		return ret;
1700	}
1701
1702	if (shim->config_stream_type) {
1703		ret = shim->config_stream_type(dig_port,
1704					       hdcp->is_repeater,
1705					       hdcp->content_type);
1706		if (ret < 0)
1707			return ret;
1708	}
1709
1710	if (hdcp->is_repeater) {
1711		ret = hdcp2_authenticate_repeater_topology(connector);
1712		if (ret < 0) {
1713			drm_dbg_kms(&i915->drm,
1714				    "Repeater Auth Failed. Err: %d\n", ret);
1715			return ret;
1716		}
1717	}
1718
1719	return ret;
1720}
1721
1722static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1723{
1724	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1725	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1726	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1727	struct intel_hdcp *hdcp = &connector->hdcp;
1728	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1729	enum port port = dig_port->base.port;
1730	int ret = 0;
1731
1732	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1733			    LINK_ENCRYPTION_STATUS)) {
1734		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1735			connector->base.name, connector->base.base.id);
1736		ret = -EPERM;
1737		goto link_recover;
1738	}
1739
1740	if (hdcp->shim->stream_2_2_encryption) {
1741		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1742		if (ret) {
1743			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1744				connector->base.name, connector->base.base.id);
1745			return ret;
1746		}
1747		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1748			    transcoder_name(hdcp->stream_transcoder));
1749	}
1750
1751	return 0;
1752
1753link_recover:
1754	if (hdcp2_deauthenticate_port(connector) < 0)
1755		drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1756
1757	dig_port->hdcp_auth_status = false;
1758	data->k = 0;
1759
1760	return ret;
1761}
1762
1763static int hdcp2_enable_encryption(struct intel_connector *connector)
1764{
1765	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1766	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1767	struct intel_hdcp *hdcp = &connector->hdcp;
1768	enum port port = dig_port->base.port;
1769	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1770	int ret;
1771
1772	drm_WARN_ON(&dev_priv->drm,
1773		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1774		    LINK_ENCRYPTION_STATUS);
1775	if (hdcp->shim->toggle_signalling) {
1776		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1777						    true);
1778		if (ret) {
1779			drm_err(&dev_priv->drm,
1780				"Failed to enable HDCP signalling. %d\n",
1781				ret);
1782			return ret;
1783		}
1784	}
1785
1786	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1787	    LINK_AUTH_STATUS) {
1788		/* Link is Authenticated. Now set for Encryption */
1789		intel_de_write(dev_priv,
1790			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1791			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1792	}
1793
1794	ret = intel_de_wait_for_set(dev_priv,
1795				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1796						 port),
1797				    LINK_ENCRYPTION_STATUS,
1798				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1799	dig_port->hdcp_auth_status = true;
1800
1801	return ret;
1802}
1803
1804static int hdcp2_disable_encryption(struct intel_connector *connector)
1805{
1806	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1807	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1808	struct intel_hdcp *hdcp = &connector->hdcp;
1809	enum port port = dig_port->base.port;
1810	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1811	int ret;
1812
1813	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1814				      LINK_ENCRYPTION_STATUS));
1815
1816	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1817		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1818
1819	ret = intel_de_wait_for_clear(dev_priv,
1820				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1821						   port),
1822				      LINK_ENCRYPTION_STATUS,
1823				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1824	if (ret == -ETIMEDOUT)
1825		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1826
1827	if (hdcp->shim->toggle_signalling) {
1828		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1829						    false);
1830		if (ret) {
1831			drm_err(&dev_priv->drm,
1832				"Failed to disable HDCP signalling. %d\n",
1833				ret);
1834			return ret;
1835		}
1836	}
1837
1838	return ret;
1839}
1840
1841static int
1842hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1843{
1844	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1845	int i, tries = 3, ret;
1846
1847	if (!connector->hdcp.is_repeater)
1848		return 0;
1849
1850	for (i = 0; i < tries; i++) {
1851		ret = _hdcp2_propagate_stream_management_info(connector);
1852		if (!ret)
1853			break;
1854
1855		/* Lets restart the auth incase of seq_num_m roll over */
1856		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1857			drm_dbg_kms(&i915->drm,
1858				    "seq_num_m roll over.(%d)\n", ret);
1859			break;
1860		}
1861
1862		drm_dbg_kms(&i915->drm,
1863			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1864			    i + 1, tries, ret);
1865	}
1866
1867	return ret;
1868}
1869
1870static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1871{
1872	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1873	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1874	int ret = 0, i, tries = 3;
1875
1876	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1877		ret = hdcp2_authenticate_sink(connector);
1878		if (!ret) {
1879			ret = hdcp2_propagate_stream_management_info(connector);
1880			if (ret) {
1881				drm_dbg_kms(&i915->drm,
1882					    "Stream management failed.(%d)\n",
1883					    ret);
1884				break;
1885			}
1886
1887			ret = hdcp2_authenticate_port(connector);
1888			if (!ret)
1889				break;
1890			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1891				    ret);
1892		}
1893
1894		/* Clearing the mei hdcp session */
1895		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1896			    i + 1, tries, ret);
1897		if (hdcp2_deauthenticate_port(connector) < 0)
1898			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1899	}
1900
1901	if (!ret && !dig_port->hdcp_auth_status) {
1902		/*
1903		 * Ensuring the required 200mSec min time interval between
1904		 * Session Key Exchange and encryption.
1905		 */
1906		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1907		ret = hdcp2_enable_encryption(connector);
1908		if (ret < 0) {
1909			drm_dbg_kms(&i915->drm,
1910				    "Encryption Enable Failed.(%d)\n", ret);
1911			if (hdcp2_deauthenticate_port(connector) < 0)
1912				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1913		}
1914	}
1915
1916	if (!ret)
1917		ret = hdcp2_enable_stream_encryption(connector);
1918
1919	return ret;
1920}
1921
1922static int _intel_hdcp2_enable(struct intel_connector *connector)
1923{
1924	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1925	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1926	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1927	struct intel_hdcp *hdcp = &connector->hdcp;
1928	int ret;
1929
1930	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1931		    connector->base.name, connector->base.base.id,
1932		    hdcp->content_type);
1933
1934	/* Stream which requires encryption */
1935	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
1936		data->k = 1;
1937		data->streams[0].stream_type = hdcp->content_type;
1938	} else {
1939		ret = intel_hdcp_required_content_stream(dig_port);
1940		if (ret)
1941			return ret;
1942	}
1943
1944	ret = hdcp2_authenticate_and_encrypt(connector);
1945	if (ret) {
1946		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1947			    hdcp->content_type, ret);
1948		return ret;
1949	}
1950
1951	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1952		    connector->base.name, connector->base.base.id,
1953		    hdcp->content_type);
1954
1955	hdcp->hdcp2_encrypted = true;
1956	return 0;
1957}
1958
1959static int
1960_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1961{
1962	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1963	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1964	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1965	struct intel_hdcp *hdcp = &connector->hdcp;
1966	int ret;
1967
1968	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1969		    connector->base.name, connector->base.base.id);
1970
1971	if (hdcp->shim->stream_2_2_encryption) {
1972		ret = hdcp->shim->stream_2_2_encryption(connector, false);
1973		if (ret) {
1974			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1975				connector->base.name, connector->base.base.id);
1976			return ret;
1977		}
1978		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1979			    transcoder_name(hdcp->stream_transcoder));
1980
1981		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1982			return 0;
1983	}
1984
1985	ret = hdcp2_disable_encryption(connector);
1986
1987	if (hdcp2_deauthenticate_port(connector) < 0)
1988		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1989
1990	connector->hdcp.hdcp2_encrypted = false;
1991	dig_port->hdcp_auth_status = false;
1992	data->k = 0;
1993
1994	return ret;
1995}
1996
1997/* Implements the Link Integrity Check for HDCP2.2 */
1998static int intel_hdcp2_check_link(struct intel_connector *connector)
1999{
2000	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2001	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2002	struct intel_hdcp *hdcp = &connector->hdcp;
2003	enum port port = dig_port->base.port;
2004	enum transcoder cpu_transcoder;
2005	int ret = 0;
2006
2007	mutex_lock(&hdcp->mutex);
2008	mutex_lock(&dig_port->hdcp_mutex);
2009	cpu_transcoder = hdcp->cpu_transcoder;
2010
2011	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2012	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2013	    !hdcp->hdcp2_encrypted) {
2014		ret = -EINVAL;
2015		goto out;
2016	}
2017
2018	if (drm_WARN_ON(&dev_priv->drm,
2019			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2020		drm_err(&dev_priv->drm,
2021			"HDCP2.2 link stopped the encryption, %x\n",
2022			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2023		ret = -ENXIO;
2024		_intel_hdcp2_disable(connector, true);
2025		intel_hdcp_update_value(connector,
2026					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2027					true);
2028		goto out;
2029	}
2030
2031	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2032	if (ret == HDCP_LINK_PROTECTED) {
2033		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2034			intel_hdcp_update_value(connector,
2035					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2036					true);
2037		}
2038		goto out;
2039	}
2040
2041	if (ret == HDCP_TOPOLOGY_CHANGE) {
2042		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2043			goto out;
2044
2045		drm_dbg_kms(&dev_priv->drm,
2046			    "HDCP2.2 Downstream topology change\n");
2047		ret = hdcp2_authenticate_repeater_topology(connector);
2048		if (!ret) {
2049			intel_hdcp_update_value(connector,
2050					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2051					true);
2052			goto out;
2053		}
2054		drm_dbg_kms(&dev_priv->drm,
2055			    "[%s:%d] Repeater topology auth failed.(%d)\n",
2056			    connector->base.name, connector->base.base.id,
2057			    ret);
2058	} else {
2059		drm_dbg_kms(&dev_priv->drm,
2060			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2061			    connector->base.name, connector->base.base.id);
2062	}
2063
2064	ret = _intel_hdcp2_disable(connector, true);
2065	if (ret) {
2066		drm_err(&dev_priv->drm,
2067			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2068			connector->base.name, connector->base.base.id, ret);
2069		intel_hdcp_update_value(connector,
2070				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2071		goto out;
2072	}
2073
2074	ret = _intel_hdcp2_enable(connector);
2075	if (ret) {
2076		drm_dbg_kms(&dev_priv->drm,
2077			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2078			    connector->base.name, connector->base.base.id,
2079			    ret);
2080		intel_hdcp_update_value(connector,
2081					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2082					true);
2083		goto out;
2084	}
2085
2086out:
2087	mutex_unlock(&dig_port->hdcp_mutex);
2088	mutex_unlock(&hdcp->mutex);
2089	return ret;
2090}
2091
2092static void intel_hdcp_check_work(struct work_struct *work)
2093{
2094	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2095					       struct intel_hdcp,
2096					       check_work);
2097	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2098
2099	if (drm_connector_is_unregistered(&connector->base))
2100		return;
2101
2102	if (!intel_hdcp2_check_link(connector))
2103		schedule_delayed_work(&hdcp->check_work,
2104				      DRM_HDCP2_CHECK_PERIOD_MS);
2105	else if (!intel_hdcp_check_link(connector))
2106		schedule_delayed_work(&hdcp->check_work,
2107				      DRM_HDCP_CHECK_PERIOD_MS);
2108}
2109
2110static int i915_hdcp_component_bind(struct device *i915_kdev,
2111				    struct device *mei_kdev, void *data)
2112{
2113	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2114
2115	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2116	mutex_lock(&dev_priv->hdcp_comp_mutex);
2117	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
2118	dev_priv->hdcp_master->mei_dev = mei_kdev;
2119	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2120
2121	return 0;
2122}
2123
2124static void i915_hdcp_component_unbind(struct device *i915_kdev,
2125				       struct device *mei_kdev, void *data)
2126{
2127	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2128
2129	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2130	mutex_lock(&dev_priv->hdcp_comp_mutex);
2131	dev_priv->hdcp_master = NULL;
2132	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2133}
2134
2135static const struct component_ops i915_hdcp_component_ops = {
2136	.bind   = i915_hdcp_component_bind,
2137	.unbind = i915_hdcp_component_unbind,
2138};
2139
2140static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2141{
2142	switch (port) {
2143	case PORT_A:
2144		return MEI_DDI_A;
2145	case PORT_B ... PORT_F:
2146		return (enum mei_fw_ddi)port;
2147	default:
2148		return MEI_DDI_INVALID_PORT;
2149	}
2150}
2151
2152static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2153{
2154	switch (cpu_transcoder) {
2155	case TRANSCODER_A ... TRANSCODER_D:
2156		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2157	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2158		return MEI_INVALID_TRANSCODER;
2159	}
2160}
2161
2162static int initialize_hdcp_port_data(struct intel_connector *connector,
2163				     struct intel_digital_port *dig_port,
2164				     const struct intel_hdcp_shim *shim)
2165{
2166	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2167	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2168	struct intel_hdcp *hdcp = &connector->hdcp;
2169	enum port port = dig_port->base.port;
2170
2171	if (DISPLAY_VER(dev_priv) < 12)
2172		data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2173	else
2174		/*
2175		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2176		 * with zero(INVALID PORT index).
2177		 */
2178		data->fw_ddi = MEI_DDI_INVALID_PORT;
2179
2180	/*
2181	 * As associated transcoder is set and modified at modeset, here fw_tc
2182	 * is initialized to zero (invalid transcoder index). This will be
2183	 * retained for <Gen12 forever.
2184	 */
2185	data->fw_tc = MEI_INVALID_TRANSCODER;
2186
2187	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2188	data->protocol = (u8)shim->protocol;
2189
2190	if (!data->streams)
2191		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2192					sizeof(struct hdcp2_streamid_type),
2193					GFP_KERNEL);
2194	if (!data->streams) {
2195		drm_err(&dev_priv->drm, "Out of Memory\n");
2196		return -ENOMEM;
2197	}
2198	/* For SST */
2199	data->streams[0].stream_id = 0;
2200	data->streams[0].stream_type = hdcp->content_type;
2201
2202	return 0;
2203}
2204
2205static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2206{
2207	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2208		return false;
2209
2210	return (DISPLAY_VER(dev_priv) >= 10 ||
2211		IS_KABYLAKE(dev_priv) ||
2212		IS_COFFEELAKE(dev_priv) ||
2213		IS_COMETLAKE(dev_priv));
2214}
2215
2216void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2217{
2218	int ret;
2219
2220	if (!is_hdcp2_supported(dev_priv))
2221		return;
2222
2223	mutex_lock(&dev_priv->hdcp_comp_mutex);
2224	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2225
2226	dev_priv->hdcp_comp_added = true;
2227	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2228	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2229				  I915_COMPONENT_HDCP);
2230	if (ret < 0) {
2231		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2232			    ret);
2233		mutex_lock(&dev_priv->hdcp_comp_mutex);
2234		dev_priv->hdcp_comp_added = false;
2235		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2236		return;
2237	}
2238}
2239
2240static void intel_hdcp2_init(struct intel_connector *connector,
2241			     struct intel_digital_port *dig_port,
2242			     const struct intel_hdcp_shim *shim)
2243{
2244	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2245	struct intel_hdcp *hdcp = &connector->hdcp;
2246	int ret;
2247
2248	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2249	if (ret) {
2250		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2251		return;
2252	}
2253
2254	hdcp->hdcp2_supported = true;
2255}
2256
2257int intel_hdcp_init(struct intel_connector *connector,
2258		    struct intel_digital_port *dig_port,
2259		    const struct intel_hdcp_shim *shim)
2260{
2261	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2262	struct intel_hdcp *hdcp = &connector->hdcp;
2263	int ret;
2264
2265	if (!shim)
2266		return -EINVAL;
2267
2268	if (is_hdcp2_supported(dev_priv))
2269		intel_hdcp2_init(connector, dig_port, shim);
2270
2271	ret =
2272	drm_connector_attach_content_protection_property(&connector->base,
2273							 hdcp->hdcp2_supported);
2274	if (ret) {
2275		hdcp->hdcp2_supported = false;
2276		kfree(dig_port->hdcp_port_data.streams);
2277		return ret;
2278	}
2279
2280	hdcp->shim = shim;
2281	mutex_init(&hdcp->mutex);
2282	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2283	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2284	init_waitqueue_head(&hdcp->cp_irq_queue);
2285
2286	return 0;
2287}
2288
2289int intel_hdcp_enable(struct intel_connector *connector,
2290		      const struct intel_crtc_state *pipe_config, u8 content_type)
2291{
2292	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2293	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2294	struct intel_hdcp *hdcp = &connector->hdcp;
2295	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2296	int ret = -EINVAL;
2297
2298	if (!hdcp->shim)
2299		return -ENOENT;
2300
2301	if (!connector->encoder) {
2302		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2303			connector->base.name, connector->base.base.id);
2304		return -ENODEV;
2305	}
2306
2307	mutex_lock(&hdcp->mutex);
2308	mutex_lock(&dig_port->hdcp_mutex);
2309	drm_WARN_ON(&dev_priv->drm,
2310		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2311	hdcp->content_type = content_type;
2312
2313	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2314		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2315		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2316	} else {
2317		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2318		hdcp->stream_transcoder = INVALID_TRANSCODER;
2319	}
2320
2321	if (DISPLAY_VER(dev_priv) >= 12)
2322		dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2323
2324	/*
2325	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2326	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2327	 */
2328	if (intel_hdcp2_capable(connector)) {
2329		ret = _intel_hdcp2_enable(connector);
2330		if (!ret)
2331			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2332	}
2333
2334	/*
2335	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2336	 * be attempted.
2337	 */
2338	if (ret && intel_hdcp_capable(connector) &&
2339	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2340		ret = _intel_hdcp_enable(connector);
2341	}
2342
2343	if (!ret) {
2344		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2345		intel_hdcp_update_value(connector,
2346					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2347					true);
2348	}
2349
2350	mutex_unlock(&dig_port->hdcp_mutex);
2351	mutex_unlock(&hdcp->mutex);
2352	return ret;
2353}
2354
2355int intel_hdcp_disable(struct intel_connector *connector)
2356{
2357	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2358	struct intel_hdcp *hdcp = &connector->hdcp;
2359	int ret = 0;
2360
2361	if (!hdcp->shim)
2362		return -ENOENT;
2363
2364	mutex_lock(&hdcp->mutex);
2365	mutex_lock(&dig_port->hdcp_mutex);
2366
2367	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2368		goto out;
2369
2370	intel_hdcp_update_value(connector,
2371				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2372	if (hdcp->hdcp2_encrypted)
2373		ret = _intel_hdcp2_disable(connector, false);
2374	else if (hdcp->hdcp_encrypted)
2375		ret = _intel_hdcp_disable(connector);
2376
2377out:
2378	mutex_unlock(&dig_port->hdcp_mutex);
2379	mutex_unlock(&hdcp->mutex);
2380	cancel_delayed_work_sync(&hdcp->check_work);
2381	return ret;
2382}
2383
2384void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2385			    struct intel_encoder *encoder,
2386			    const struct intel_crtc_state *crtc_state,
2387			    const struct drm_connector_state *conn_state)
2388{
2389	struct intel_connector *connector =
2390				to_intel_connector(conn_state->connector);
2391	struct intel_hdcp *hdcp = &connector->hdcp;
2392	bool content_protection_type_changed, desired_and_not_enabled = false;
2393
2394	if (!connector->hdcp.shim)
2395		return;
2396
2397	content_protection_type_changed =
2398		(conn_state->hdcp_content_type != hdcp->content_type &&
2399		 conn_state->content_protection !=
2400		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2401
2402	/*
2403	 * During the HDCP encryption session if Type change is requested,
2404	 * disable the HDCP and reenable it with new TYPE value.
2405	 */
2406	if (conn_state->content_protection ==
2407	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2408	    content_protection_type_changed)
2409		intel_hdcp_disable(connector);
2410
2411	/*
2412	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2413	 * change procedure.
2414	 */
2415	if (content_protection_type_changed) {
2416		mutex_lock(&hdcp->mutex);
2417		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2418		drm_connector_get(&connector->base);
2419		schedule_work(&hdcp->prop_work);
2420		mutex_unlock(&hdcp->mutex);
2421	}
2422
2423	if (conn_state->content_protection ==
2424	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2425		mutex_lock(&hdcp->mutex);
2426		/* Avoid enabling hdcp, if it already ENABLED */
2427		desired_and_not_enabled =
2428			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2429		mutex_unlock(&hdcp->mutex);
2430		/*
2431		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2432		 * prop_work to update correct CP property to user space.
2433		 */
2434		if (!desired_and_not_enabled && !content_protection_type_changed) {
2435			drm_connector_get(&connector->base);
2436			schedule_work(&hdcp->prop_work);
2437		}
2438	}
2439
2440	if (desired_and_not_enabled || content_protection_type_changed)
2441		intel_hdcp_enable(connector,
2442				  crtc_state,
2443				  (u8)conn_state->hdcp_content_type);
2444}
2445
2446void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2447{
2448	mutex_lock(&dev_priv->hdcp_comp_mutex);
2449	if (!dev_priv->hdcp_comp_added) {
2450		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2451		return;
2452	}
2453
2454	dev_priv->hdcp_comp_added = false;
2455	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2456
2457	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2458}
2459
2460void intel_hdcp_cleanup(struct intel_connector *connector)
2461{
2462	struct intel_hdcp *hdcp = &connector->hdcp;
2463
2464	if (!hdcp->shim)
2465		return;
2466
2467	/*
2468	 * If the connector is registered, it's possible userspace could kick
2469	 * off another HDCP enable, which would re-spawn the workers.
2470	 */
2471	drm_WARN_ON(connector->base.dev,
2472		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2473
2474	/*
2475	 * Now that the connector is not registered, check_work won't be run,
2476	 * but cancel any outstanding instances of it
2477	 */
2478	cancel_delayed_work_sync(&hdcp->check_work);
2479
2480	/*
2481	 * We don't cancel prop_work in the same way as check_work since it
2482	 * requires connection_mutex which could be held while calling this
2483	 * function. Instead, we rely on the connector references grabbed before
2484	 * scheduling prop_work to ensure the connector is alive when prop_work
2485	 * is run. So if we're in the destroy path (which is where this
2486	 * function should be called), we're "guaranteed" that prop_work is not
2487	 * active (tl;dr This Should Never Happen).
2488	 */
2489	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2490
2491	mutex_lock(&hdcp->mutex);
2492	hdcp->shim = NULL;
2493	mutex_unlock(&hdcp->mutex);
2494}
2495
2496void intel_hdcp_atomic_check(struct drm_connector *connector,
2497			     struct drm_connector_state *old_state,
2498			     struct drm_connector_state *new_state)
2499{
2500	u64 old_cp = old_state->content_protection;
2501	u64 new_cp = new_state->content_protection;
2502	struct drm_crtc_state *crtc_state;
2503
2504	if (!new_state->crtc) {
2505		/*
2506		 * If the connector is being disabled with CP enabled, mark it
2507		 * desired so it's re-enabled when the connector is brought back
2508		 */
2509		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2510			new_state->content_protection =
2511				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2512		return;
2513	}
2514
2515	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2516						   new_state->crtc);
2517	/*
2518	 * Fix the HDCP uapi content protection state in case of modeset.
2519	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2520	 * need to be sent if there is transition from ENABLED->DESIRED.
2521	 */
2522	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2523	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2524	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2525		new_state->content_protection =
2526			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2527
2528	/*
2529	 * Nothing to do if the state didn't change, or HDCP was activated since
2530	 * the last commit. And also no change in hdcp content type.
2531	 */
2532	if (old_cp == new_cp ||
2533	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2534	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2535		if (old_state->hdcp_content_type ==
2536				new_state->hdcp_content_type)
2537			return;
2538	}
2539
2540	crtc_state->mode_changed = true;
2541}
2542
2543/* Handles the CP_IRQ raised from the DP HDCP sink */
2544void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2545{
2546	struct intel_hdcp *hdcp = &connector->hdcp;
2547
2548	if (!hdcp->shim)
2549		return;
2550
2551	atomic_inc(&connector->hdcp.cp_irq_count);
2552	wake_up_all(&connector->hdcp.cp_irq_queue);
2553
2554	schedule_delayed_work(&hdcp->check_work, 0);
2555}