Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright (C) 2017 Google, Inc.
   4 * Copyright _ 2017-2019, Intel Corporation.
   5 *
   6 * Authors:
   7 * Sean Paul <seanpaul@chromium.org>
   8 * Ramalingam C <ramalingam.c@intel.com>
   9 */
  10
  11#include <linux/component.h>
  12#include <linux/i2c.h>
  13#include <linux/random.h>
  14
  15#include <drm/drm_hdcp.h>
  16#include <drm/i915_component.h>
  17
  18#include "i915_drv.h"
  19#include "i915_reg.h"
  20#include "intel_display_power.h"
  21#include "intel_de.h"
 
 
  22#include "intel_display_types.h"
  23#include "intel_hdcp.h"
  24#include "intel_sideband.h"
  25#include "intel_connector.h"
 
 
  26
  27#define KEY_LOAD_TRIES	5
  28#define HDCP2_LC_RETRY_CNT			3
  29
  30static int intel_conn_to_vcpi(struct intel_connector *connector)
 
 
 
  31{
  32	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
  33	return connector->port	? connector->port->vcpi.vcpi : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34}
  35
  36static bool
  37intel_streams_type1_capable(struct intel_connector *connector)
  38{
  39	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
  40	bool capable = false;
 
 
  41
  42	if (!shim)
  43		return capable;
 
 
  44
  45	if (shim->streams_type1_capable)
  46		shim->streams_type1_capable(connector, &capable);
 
 
 
  47
  48	return capable;
 
 
 
 
 
 
  49}
  50
  51/*
  52 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
  53 * content_type for all streams in DP MST topology because security f/w doesn't
  54 * have any provision to mark content_type for each stream separately, it marks
  55 * all available streams with the content_type proivided at the time of port
  56 * authentication. This may prohibit the userspace to use type1 content on
  57 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
  58 * DP MST topology. Though it is not compulsory, security fw should change its
  59 * policy to mark different content_types for different streams.
  60 */
  61static int
  62intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
 
  63{
 
  64	struct drm_connector_list_iter conn_iter;
  65	struct intel_digital_port *conn_dig_port;
  66	struct intel_connector *connector;
  67	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  68	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
  69	bool enforce_type0 = false;
  70	int k;
  71
  72	data->k = 0;
  73
  74	if (dig_port->hdcp_auth_status)
  75		return 0;
  76
  77	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
 
 
 
 
 
  78	for_each_intel_connector_iter(connector, &conn_iter) {
  79		if (connector->base.status == connector_status_disconnected)
  80			continue;
  81
  82		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
  83			continue;
  84
  85		conn_dig_port = intel_attached_dig_port(connector);
  86		if (conn_dig_port != dig_port)
  87			continue;
  88
  89		if (!enforce_type0 && !intel_streams_type1_capable(connector))
  90			enforce_type0 = true;
  91
  92		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
  93		data->k++;
  94
  95		/* if there is only one active stream */
  96		if (dig_port->dp.active_mst_links <= 1)
  97			break;
  98	}
  99	drm_connector_list_iter_end(&conn_iter);
 100
 101	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
 102		return -EINVAL;
 103
 104	/*
 105	 * Apply common protection level across all streams in DP MST Topology.
 106	 * Use highest supported content type for all streams in DP MST Topology.
 107	 */
 108	for (k = 0; k < data->k; k++)
 109		data->streams[k].stream_type =
 110			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
 111
 112	return 0;
 113}
 114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115static
 116bool intel_hdcp_is_ksv_valid(u8 *ksv)
 117{
 118	int i, ones = 0;
 119	/* KSV has 20 1's and 20 0's */
 120	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
 121		ones += hweight8(ksv[i]);
 122	if (ones != 20)
 123		return false;
 124
 125	return true;
 126}
 127
 128static
 129int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
 130			       const struct intel_hdcp_shim *shim, u8 *bksv)
 131{
 132	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 133	int ret, i, tries = 2;
 134
 135	/* HDCP spec states that we must retry the bksv if it is invalid */
 136	for (i = 0; i < tries; i++) {
 137		ret = shim->read_bksv(dig_port, bksv);
 138		if (ret)
 139			return ret;
 140		if (intel_hdcp_is_ksv_valid(bksv))
 141			break;
 142	}
 143	if (i == tries) {
 144		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
 145		return -ENODEV;
 146	}
 147
 148	return 0;
 149}
 150
 151/* Is HDCP1.4 capable on Platform and Sink */
 152bool intel_hdcp_capable(struct intel_connector *connector)
 153{
 154	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 155	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 156	bool capable = false;
 157	u8 bksv[5];
 158
 
 
 
 
 
 159	if (!shim)
 160		return capable;
 161
 162	if (shim->hdcp_capable) {
 163		shim->hdcp_capable(dig_port, &capable);
 164	} else {
 165		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
 166			capable = true;
 167	}
 168
 169	return capable;
 170}
 171
 172/* Is HDCP2.2 capable on Platform and Sink */
 173bool intel_hdcp2_capable(struct intel_connector *connector)
 
 
 
 174{
 175	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 176	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 177	struct intel_hdcp *hdcp = &connector->hdcp;
 178	bool capable = false;
 179
 180	/* I915 support for HDCP2.2 */
 181	if (!hdcp->hdcp2_supported)
 182		return false;
 183
 184	/* MEI interface is solid */
 185	mutex_lock(&dev_priv->hdcp_comp_mutex);
 186	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
 187		mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 
 
 
 
 
 188		return false;
 189	}
 190	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 191
 192	/* Sink's capability for HDCP2.2 */
 193	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
 194
 195	return capable;
 196}
 197
 198static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199			      enum transcoder cpu_transcoder, enum port port)
 200{
 201	return intel_de_read(dev_priv,
 202	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 203	       HDCP_STATUS_ENC;
 204}
 205
 206static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
 207			       enum transcoder cpu_transcoder, enum port port)
 208{
 209	return intel_de_read(dev_priv,
 210	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
 211	       LINK_ENCRYPTION_STATUS;
 212}
 213
 214static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 215				    const struct intel_hdcp_shim *shim)
 216{
 217	int ret, read_ret;
 218	bool ksv_ready;
 219
 220	/* Poll for ksv list ready (spec says max time allowed is 5s) */
 221	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
 222							 &ksv_ready),
 223			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
 224			 100 * 1000);
 225	if (ret)
 226		return ret;
 227	if (read_ret)
 228		return read_ret;
 229	if (!ksv_ready)
 230		return -ETIMEDOUT;
 231
 232	return 0;
 233}
 234
 235static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
 236{
 
 237	enum i915_power_well_id id;
 238	intel_wakeref_t wakeref;
 239	bool enabled = false;
 240
 241	/*
 242	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
 243	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
 244	 */
 245	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 246		id = HSW_DISP_PW_GLOBAL;
 247	else
 248		id = SKL_DISP_PW_1;
 249
 250	/* PG1 (power well #1) needs to be enabled */
 251	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
 252		enabled = intel_display_power_well_is_enabled(dev_priv, id);
 253
 254	/*
 255	 * Another req for hdcp key loadability is enabled state of pll for
 256	 * cdclk. Without active crtc we wont land here. So we are assuming that
 257	 * cdclk is already on.
 258	 */
 259
 260	return enabled;
 261}
 262
 263static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
 264{
 265	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
 266	intel_de_write(dev_priv, HDCP_KEY_STATUS,
 267		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 268}
 269
 270static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 271{
 
 272	int ret;
 273	u32 val;
 274
 275	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
 276	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 277		return 0;
 278
 279	/*
 280	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
 281	 * out of reset. So if Key is not already loaded, its an error state.
 282	 */
 283	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 284		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 285			return -ENXIO;
 286
 287	/*
 288	 * Initiate loading the HDCP key from fuses.
 289	 *
 290	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
 291	 * version 9 platforms (minus BXT) differ in the key load trigger
 292	 * process from other platforms. These platforms use the GT Driver
 293	 * Mailbox interface.
 294	 */
 295	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
 296		ret = sandybridge_pcode_write(dev_priv,
 297					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
 298		if (ret) {
 299			drm_err(&dev_priv->drm,
 300				"Failed to initiate HDCP key load (%d)\n",
 301				ret);
 302			return ret;
 303		}
 304	} else {
 305		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 306	}
 307
 308	/* Wait for the keys to load (500us) */
 309	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
 310					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 311					10, 1, &val);
 312	if (ret)
 313		return ret;
 314	else if (!(val & HDCP_KEY_LOAD_STATUS))
 315		return -ENXIO;
 316
 317	/* Send Aksv over to PCH display for use in authentication */
 318	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 319
 320	return 0;
 321}
 322
 323/* Returns updated SHA-1 index */
 324static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
 325{
 326	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
 327	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
 328		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
 329		return -ETIMEDOUT;
 330	}
 331	return 0;
 332}
 333
 334static
 335u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
 336				enum transcoder cpu_transcoder, enum port port)
 337{
 338	if (DISPLAY_VER(dev_priv) >= 12) {
 339		switch (cpu_transcoder) {
 340		case TRANSCODER_A:
 341			return HDCP_TRANSA_REP_PRESENT |
 342			       HDCP_TRANSA_SHA1_M0;
 343		case TRANSCODER_B:
 344			return HDCP_TRANSB_REP_PRESENT |
 345			       HDCP_TRANSB_SHA1_M0;
 346		case TRANSCODER_C:
 347			return HDCP_TRANSC_REP_PRESENT |
 348			       HDCP_TRANSC_SHA1_M0;
 349		case TRANSCODER_D:
 350			return HDCP_TRANSD_REP_PRESENT |
 351			       HDCP_TRANSD_SHA1_M0;
 352		default:
 353			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
 354				cpu_transcoder);
 355			return -EINVAL;
 356		}
 357	}
 358
 359	switch (port) {
 360	case PORT_A:
 361		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
 362	case PORT_B:
 363		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
 364	case PORT_C:
 365		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
 366	case PORT_D:
 367		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
 368	case PORT_E:
 369		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 370	default:
 371		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
 372		return -EINVAL;
 373	}
 374}
 375
 376static
 377int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 378				const struct intel_hdcp_shim *shim,
 379				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 380{
 
 381	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 382	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 383	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 384	enum port port = dig_port->base.port;
 385	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 386	int ret, i, j, sha_idx;
 387
 388	/* Process V' values from the receiver */
 389	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 390		ret = shim->read_v_prime_part(dig_port, i, &vprime);
 391		if (ret)
 392			return ret;
 393		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
 394	}
 395
 396	/*
 397	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
 398	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
 399	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
 400	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
 401	 * index will keep track of our progress through the 64 bytes as well as
 402	 * helping us work the 40-bit KSVs through our 32-bit register.
 403	 *
 404	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
 405	 */
 406	sha_idx = 0;
 407	sha_text = 0;
 408	sha_leftovers = 0;
 409	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
 410	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 411	for (i = 0; i < num_downstream; i++) {
 412		unsigned int sha_empty;
 413		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
 414
 415		/* Fill up the empty slots in sha_text and write it out */
 416		sha_empty = sizeof(sha_text) - sha_leftovers;
 417		for (j = 0; j < sha_empty; j++) {
 418			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
 419			sha_text |= ksv[j] << off;
 420		}
 421
 422		ret = intel_write_sha_text(dev_priv, sha_text);
 423		if (ret < 0)
 424			return ret;
 425
 426		/* Programming guide writes this every 64 bytes */
 427		sha_idx += sizeof(sha_text);
 428		if (!(sha_idx % 64))
 429			intel_de_write(dev_priv, HDCP_REP_CTL,
 430				       rep_ctl | HDCP_SHA1_TEXT_32);
 431
 432		/* Store the leftover bytes from the ksv in sha_text */
 433		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
 434		sha_text = 0;
 435		for (j = 0; j < sha_leftovers; j++)
 436			sha_text |= ksv[sha_empty + j] <<
 437					((sizeof(sha_text) - j - 1) * 8);
 438
 439		/*
 440		 * If we still have room in sha_text for more data, continue.
 441		 * Otherwise, write it out immediately.
 442		 */
 443		if (sizeof(sha_text) > sha_leftovers)
 444			continue;
 445
 446		ret = intel_write_sha_text(dev_priv, sha_text);
 447		if (ret < 0)
 448			return ret;
 449		sha_leftovers = 0;
 450		sha_text = 0;
 451		sha_idx += sizeof(sha_text);
 452	}
 453
 454	/*
 455	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
 456	 * bytes are leftover from the last ksv, we might be able to fit them
 457	 * all in sha_text (first 2 cases), or we might need to split them up
 458	 * into 2 writes (last 2 cases).
 459	 */
 460	if (sha_leftovers == 0) {
 461		/* Write 16 bits of text, 16 bits of M0 */
 462		intel_de_write(dev_priv, HDCP_REP_CTL,
 463			       rep_ctl | HDCP_SHA1_TEXT_16);
 464		ret = intel_write_sha_text(dev_priv,
 465					   bstatus[0] << 8 | bstatus[1]);
 466		if (ret < 0)
 467			return ret;
 468		sha_idx += sizeof(sha_text);
 469
 470		/* Write 32 bits of M0 */
 471		intel_de_write(dev_priv, HDCP_REP_CTL,
 472			       rep_ctl | HDCP_SHA1_TEXT_0);
 473		ret = intel_write_sha_text(dev_priv, 0);
 474		if (ret < 0)
 475			return ret;
 476		sha_idx += sizeof(sha_text);
 477
 478		/* Write 16 bits of M0 */
 479		intel_de_write(dev_priv, HDCP_REP_CTL,
 480			       rep_ctl | HDCP_SHA1_TEXT_16);
 481		ret = intel_write_sha_text(dev_priv, 0);
 482		if (ret < 0)
 483			return ret;
 484		sha_idx += sizeof(sha_text);
 485
 486	} else if (sha_leftovers == 1) {
 487		/* Write 24 bits of text, 8 bits of M0 */
 488		intel_de_write(dev_priv, HDCP_REP_CTL,
 489			       rep_ctl | HDCP_SHA1_TEXT_24);
 490		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 491		/* Only 24-bits of data, must be in the LSB */
 492		sha_text = (sha_text & 0xffffff00) >> 8;
 493		ret = intel_write_sha_text(dev_priv, sha_text);
 494		if (ret < 0)
 495			return ret;
 496		sha_idx += sizeof(sha_text);
 497
 498		/* Write 32 bits of M0 */
 499		intel_de_write(dev_priv, HDCP_REP_CTL,
 500			       rep_ctl | HDCP_SHA1_TEXT_0);
 501		ret = intel_write_sha_text(dev_priv, 0);
 502		if (ret < 0)
 503			return ret;
 504		sha_idx += sizeof(sha_text);
 505
 506		/* Write 24 bits of M0 */
 507		intel_de_write(dev_priv, HDCP_REP_CTL,
 508			       rep_ctl | HDCP_SHA1_TEXT_8);
 509		ret = intel_write_sha_text(dev_priv, 0);
 510		if (ret < 0)
 511			return ret;
 512		sha_idx += sizeof(sha_text);
 513
 514	} else if (sha_leftovers == 2) {
 515		/* Write 32 bits of text */
 516		intel_de_write(dev_priv, HDCP_REP_CTL,
 517			       rep_ctl | HDCP_SHA1_TEXT_32);
 518		sha_text |= bstatus[0] << 8 | bstatus[1];
 519		ret = intel_write_sha_text(dev_priv, sha_text);
 520		if (ret < 0)
 521			return ret;
 522		sha_idx += sizeof(sha_text);
 523
 524		/* Write 64 bits of M0 */
 525		intel_de_write(dev_priv, HDCP_REP_CTL,
 526			       rep_ctl | HDCP_SHA1_TEXT_0);
 527		for (i = 0; i < 2; i++) {
 528			ret = intel_write_sha_text(dev_priv, 0);
 529			if (ret < 0)
 530				return ret;
 531			sha_idx += sizeof(sha_text);
 532		}
 533
 534		/*
 535		 * Terminate the SHA-1 stream by hand. For the other leftover
 536		 * cases this is appended by the hardware.
 537		 */
 538		intel_de_write(dev_priv, HDCP_REP_CTL,
 539			       rep_ctl | HDCP_SHA1_TEXT_32);
 540		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
 541		ret = intel_write_sha_text(dev_priv, sha_text);
 542		if (ret < 0)
 543			return ret;
 544		sha_idx += sizeof(sha_text);
 545	} else if (sha_leftovers == 3) {
 546		/* Write 32 bits of text (filled from LSB) */
 547		intel_de_write(dev_priv, HDCP_REP_CTL,
 548			       rep_ctl | HDCP_SHA1_TEXT_32);
 549		sha_text |= bstatus[0];
 550		ret = intel_write_sha_text(dev_priv, sha_text);
 551		if (ret < 0)
 552			return ret;
 553		sha_idx += sizeof(sha_text);
 554
 555		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
 556		intel_de_write(dev_priv, HDCP_REP_CTL,
 557			       rep_ctl | HDCP_SHA1_TEXT_8);
 558		ret = intel_write_sha_text(dev_priv, bstatus[1]);
 559		if (ret < 0)
 560			return ret;
 561		sha_idx += sizeof(sha_text);
 562
 563		/* Write 32 bits of M0 */
 564		intel_de_write(dev_priv, HDCP_REP_CTL,
 565			       rep_ctl | HDCP_SHA1_TEXT_0);
 566		ret = intel_write_sha_text(dev_priv, 0);
 567		if (ret < 0)
 568			return ret;
 569		sha_idx += sizeof(sha_text);
 570
 571		/* Write 8 bits of M0 */
 572		intel_de_write(dev_priv, HDCP_REP_CTL,
 573			       rep_ctl | HDCP_SHA1_TEXT_24);
 574		ret = intel_write_sha_text(dev_priv, 0);
 575		if (ret < 0)
 576			return ret;
 577		sha_idx += sizeof(sha_text);
 578	} else {
 579		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
 580			    sha_leftovers);
 581		return -EINVAL;
 582	}
 583
 584	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 585	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 586	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 587		ret = intel_write_sha_text(dev_priv, 0);
 588		if (ret < 0)
 589			return ret;
 590		sha_idx += sizeof(sha_text);
 591	}
 592
 593	/*
 594	 * Last write gets the length of the concatenation in bits. That is:
 595	 *  - 5 bytes per device
 596	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
 597	 */
 598	sha_text = (num_downstream * 5 + 10) * 8;
 599	ret = intel_write_sha_text(dev_priv, sha_text);
 600	if (ret < 0)
 601		return ret;
 602
 603	/* Tell the HW we're done with the hash and wait for it to ACK */
 604	intel_de_write(dev_priv, HDCP_REP_CTL,
 605		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 606	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
 607				  HDCP_SHA1_COMPLETE, 1)) {
 608		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
 609		return -ETIMEDOUT;
 610	}
 611	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
 612		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
 613		return -ENXIO;
 614	}
 615
 616	return 0;
 617}
 618
 619/* Implements Part 2 of the HDCP authorization procedure */
 620static
 621int intel_hdcp_auth_downstream(struct intel_connector *connector)
 622{
 
 623	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 624	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 625	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 626	u8 bstatus[2], num_downstream, *ksv_fifo;
 627	int ret, i, tries = 3;
 628
 629	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
 630	if (ret) {
 631		drm_dbg_kms(&dev_priv->drm,
 632			    "KSV list failed to become ready (%d)\n", ret);
 633		return ret;
 634	}
 635
 636	ret = shim->read_bstatus(dig_port, bstatus);
 637	if (ret)
 638		return ret;
 639
 640	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 641	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
 642		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
 643		return -EPERM;
 644	}
 645
 646	/*
 647	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
 648	 * the HDCP encryption. That implies that repeater can't have its own
 649	 * display. As there is no consumption of encrypted content in the
 650	 * repeater with 0 downstream devices, we are failing the
 651	 * authentication.
 652	 */
 653	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 654	if (num_downstream == 0) {
 655		drm_dbg_kms(&dev_priv->drm,
 656			    "Repeater with zero downstream devices\n");
 657		return -EINVAL;
 658	}
 659
 660	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 661	if (!ksv_fifo) {
 662		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
 663		return -ENOMEM;
 664	}
 665
 666	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
 667	if (ret)
 668		goto err;
 669
 670	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
 671					num_downstream) > 0) {
 672		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
 673		ret = -EPERM;
 674		goto err;
 675	}
 676
 677	/*
 678	 * When V prime mismatches, DP Spec mandates re-read of
 679	 * V prime atleast twice.
 680	 */
 681	for (i = 0; i < tries; i++) {
 682		ret = intel_hdcp_validate_v_prime(connector, shim,
 683						  ksv_fifo, num_downstream,
 684						  bstatus);
 685		if (!ret)
 686			break;
 687	}
 688
 689	if (i == tries) {
 690		drm_dbg_kms(&dev_priv->drm,
 691			    "V Prime validation failed.(%d)\n", ret);
 692		goto err;
 693	}
 694
 695	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
 696		    num_downstream);
 697	ret = 0;
 698err:
 699	kfree(ksv_fifo);
 700	return ret;
 701}
 702
 703/* Implements Part 1 of the HDCP authorization procedure */
 704static int intel_hdcp_auth(struct intel_connector *connector)
 705{
 
 706	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 707	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 708	struct intel_hdcp *hdcp = &connector->hdcp;
 709	const struct intel_hdcp_shim *shim = hdcp->shim;
 710	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 711	enum port port = dig_port->base.port;
 712	unsigned long r0_prime_gen_start;
 713	int ret, i, tries = 2;
 714	union {
 715		u32 reg[2];
 716		u8 shim[DRM_HDCP_AN_LEN];
 717	} an;
 718	union {
 719		u32 reg[2];
 720		u8 shim[DRM_HDCP_KSV_LEN];
 721	} bksv;
 722	union {
 723		u32 reg;
 724		u8 shim[DRM_HDCP_RI_LEN];
 725	} ri;
 726	bool repeater_present, hdcp_capable;
 727
 728	/*
 729	 * Detects whether the display is HDCP capable. Although we check for
 730	 * valid Bksv below, the HDCP over DP spec requires that we check
 731	 * whether the display supports HDCP before we write An. For HDMI
 732	 * displays, this is not necessary.
 733	 */
 734	if (shim->hdcp_capable) {
 735		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
 736		if (ret)
 737			return ret;
 738		if (!hdcp_capable) {
 739			drm_dbg_kms(&dev_priv->drm,
 740				    "Panel is not HDCP capable\n");
 741			return -EINVAL;
 742		}
 743	}
 744
 745	/* Initialize An with 2 random values and acquire it */
 746	for (i = 0; i < 2; i++)
 747		intel_de_write(dev_priv,
 748			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
 749			       get_random_u32());
 750	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
 751		       HDCP_CONF_CAPTURE_AN);
 752
 753	/* Wait for An to be acquired */
 754	if (intel_de_wait_for_set(dev_priv,
 755				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 756				  HDCP_STATUS_AN_READY, 1)) {
 757		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
 758		return -ETIMEDOUT;
 759	}
 760
 761	an.reg[0] = intel_de_read(dev_priv,
 762				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
 763	an.reg[1] = intel_de_read(dev_priv,
 764				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
 765	ret = shim->write_an_aksv(dig_port, an.shim);
 766	if (ret)
 767		return ret;
 768
 769	r0_prime_gen_start = jiffies;
 770
 771	memset(&bksv, 0, sizeof(bksv));
 772
 773	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
 774	if (ret < 0)
 775		return ret;
 776
 777	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
 778		drm_err(&dev_priv->drm, "BKSV is revoked\n");
 779		return -EPERM;
 780	}
 781
 782	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
 783		       bksv.reg[0]);
 784	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
 785		       bksv.reg[1]);
 786
 787	ret = shim->repeater_present(dig_port, &repeater_present);
 788	if (ret)
 789		return ret;
 790	if (repeater_present)
 791		intel_de_write(dev_priv, HDCP_REP_CTL,
 792			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
 793
 794	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
 795	if (ret)
 796		return ret;
 797
 798	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
 799		       HDCP_CONF_AUTH_AND_ENC);
 800
 801	/* Wait for R0 ready */
 802	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 803		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
 804		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
 805		return -ETIMEDOUT;
 806	}
 807
 808	/*
 809	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
 810	 * some monitors can take longer than this. We'll set the timeout at
 811	 * 300ms just to be sure.
 812	 *
 813	 * On DP, there's an R0_READY bit available but no such bit
 814	 * exists on HDMI. Since the upper-bound is the same, we'll just do
 815	 * the stupid thing instead of polling on one and not the other.
 816	 */
 817	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 818
 819	tries = 3;
 820
 821	/*
 822	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
 823	 * of R0 mismatch.
 824	 */
 825	for (i = 0; i < tries; i++) {
 826		ri.reg = 0;
 827		ret = shim->read_ri_prime(dig_port, ri.shim);
 828		if (ret)
 829			return ret;
 830		intel_de_write(dev_priv,
 831			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
 832			       ri.reg);
 833
 834		/* Wait for Ri prime match */
 835		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 836			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 837			break;
 838	}
 839
 840	if (i == tries) {
 841		drm_dbg_kms(&dev_priv->drm,
 842			    "Timed out waiting for Ri prime match (%x)\n",
 843			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
 844					  cpu_transcoder, port)));
 845		return -ETIMEDOUT;
 846	}
 847
 848	/* Wait for encryption confirmation */
 849	if (intel_de_wait_for_set(dev_priv,
 850				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 851				  HDCP_STATUS_ENC,
 852				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 853		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
 854		return -ETIMEDOUT;
 855	}
 856
 857	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
 858	if (shim->stream_encryption) {
 859		ret = shim->stream_encryption(connector, true);
 860		if (ret) {
 861			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
 862				connector->base.name, connector->base.base.id);
 863			return ret;
 864		}
 865		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
 866			    transcoder_name(hdcp->stream_transcoder));
 867	}
 868
 869	if (repeater_present)
 870		return intel_hdcp_auth_downstream(connector);
 871
 872	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
 873	return 0;
 874}
 875
 876static int _intel_hdcp_disable(struct intel_connector *connector)
 877{
 
 878	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 879	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 880	struct intel_hdcp *hdcp = &connector->hdcp;
 881	enum port port = dig_port->base.port;
 882	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 883	u32 repeater_ctl;
 884	int ret;
 885
 886	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
 887		    connector->base.name, connector->base.base.id);
 888
 889	if (hdcp->shim->stream_encryption) {
 890		ret = hdcp->shim->stream_encryption(connector, false);
 891		if (ret) {
 892			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
 893				connector->base.name, connector->base.base.id);
 894			return ret;
 895		}
 896		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
 897			    transcoder_name(hdcp->stream_transcoder));
 898		/*
 899		 * If there are other connectors on this port using HDCP,
 900		 * don't disable it until it disabled HDCP encryption for
 901		 * all connectors in MST topology.
 902		 */
 903		if (dig_port->num_hdcp_streams > 0)
 904			return 0;
 905	}
 906
 907	hdcp->hdcp_encrypted = false;
 908	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
 909	if (intel_de_wait_for_clear(dev_priv,
 910				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
 911				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 912		drm_err(&dev_priv->drm,
 913			"Failed to disable HDCP, timeout clearing status\n");
 914		return -ETIMEDOUT;
 915	}
 916
 917	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
 918						   port);
 919	intel_de_write(dev_priv, HDCP_REP_CTL,
 920		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
 921
 922	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
 923	if (ret) {
 924		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
 925		return ret;
 926	}
 927
 928	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
 929	return 0;
 930}
 931
 932static int _intel_hdcp_enable(struct intel_connector *connector)
 933{
 934	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 935	struct intel_hdcp *hdcp = &connector->hdcp;
 936	int i, ret, tries = 3;
 937
 938	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
 939		    connector->base.name, connector->base.base.id);
 940
 941	if (!hdcp_key_loadable(dev_priv)) {
 942		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
 943		return -ENXIO;
 944	}
 945
 946	for (i = 0; i < KEY_LOAD_TRIES; i++) {
 947		ret = intel_hdcp_load_keys(dev_priv);
 948		if (!ret)
 949			break;
 950		intel_hdcp_clear_keys(dev_priv);
 951	}
 952	if (ret) {
 953		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
 954			ret);
 955		return ret;
 956	}
 957
 958	/* Incase of authentication failures, HDCP spec expects reauth. */
 959	for (i = 0; i < tries; i++) {
 960		ret = intel_hdcp_auth(connector);
 961		if (!ret) {
 962			hdcp->hdcp_encrypted = true;
 963			return 0;
 964		}
 965
 966		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
 967
 968		/* Ensuring HDCP encryption and signalling are stopped. */
 969		_intel_hdcp_disable(connector);
 970	}
 971
 972	drm_dbg_kms(&dev_priv->drm,
 973		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
 974	return ret;
 975}
 976
 977static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 978{
 979	return container_of(hdcp, struct intel_connector, hdcp);
 980}
 981
 982static void intel_hdcp_update_value(struct intel_connector *connector,
 983				    u64 value, bool update_property)
 984{
 985	struct drm_device *dev = connector->base.dev;
 
 986	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 987	struct intel_hdcp *hdcp = &connector->hdcp;
 988
 989	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
 990
 991	if (hdcp->value == value)
 992		return;
 993
 994	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
 995
 996	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
 997		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
 998			dig_port->num_hdcp_streams--;
 999	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1000		dig_port->num_hdcp_streams++;
1001	}
1002
1003	hdcp->value = value;
1004	if (update_property) {
1005		drm_connector_get(&connector->base);
1006		schedule_work(&hdcp->prop_work);
 
1007	}
1008}
1009
1010/* Implements Part 3 of the HDCP authorization procedure */
1011static int intel_hdcp_check_link(struct intel_connector *connector)
1012{
 
1013	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1014	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1015	struct intel_hdcp *hdcp = &connector->hdcp;
1016	enum port port = dig_port->base.port;
1017	enum transcoder cpu_transcoder;
1018	int ret = 0;
1019
1020	mutex_lock(&hdcp->mutex);
1021	mutex_lock(&dig_port->hdcp_mutex);
1022
1023	cpu_transcoder = hdcp->cpu_transcoder;
1024
1025	/* Check_link valid only when HDCP1.4 is enabled */
1026	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1027	    !hdcp->hdcp_encrypted) {
1028		ret = -EINVAL;
1029		goto out;
1030	}
1031
1032	if (drm_WARN_ON(&dev_priv->drm,
1033			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1034		drm_err(&dev_priv->drm,
1035			"%s:%d HDCP link stopped encryption,%x\n",
1036			connector->base.name, connector->base.base.id,
1037			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1038		ret = -ENXIO;
1039		intel_hdcp_update_value(connector,
1040					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1041					true);
1042		goto out;
1043	}
1044
1045	if (hdcp->shim->check_link(dig_port, connector)) {
1046		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1047			intel_hdcp_update_value(connector,
1048				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1049		}
1050		goto out;
1051	}
1052
1053	drm_dbg_kms(&dev_priv->drm,
1054		    "[%s:%d] HDCP link failed, retrying authentication\n",
1055		    connector->base.name, connector->base.base.id);
1056
1057	ret = _intel_hdcp_disable(connector);
1058	if (ret) {
1059		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1060		intel_hdcp_update_value(connector,
1061					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1062					true);
1063		goto out;
1064	}
1065
1066	ret = _intel_hdcp_enable(connector);
1067	if (ret) {
1068		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1069		intel_hdcp_update_value(connector,
1070					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1071					true);
1072		goto out;
1073	}
1074
1075out:
1076	mutex_unlock(&dig_port->hdcp_mutex);
1077	mutex_unlock(&hdcp->mutex);
1078	return ret;
1079}
1080
1081static void intel_hdcp_prop_work(struct work_struct *work)
1082{
1083	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1084					       prop_work);
1085	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1086	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1087
1088	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1089	mutex_lock(&hdcp->mutex);
1090
1091	/*
1092	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1093	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1094	 * we're running just after hdcp has been disabled, so just exit
1095	 */
1096	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1097		drm_hdcp_update_content_protection(&connector->base,
1098						   hdcp->value);
1099
1100	mutex_unlock(&hdcp->mutex);
1101	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1102
1103	drm_connector_put(&connector->base);
1104}
1105
1106bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1107{
1108	return INTEL_INFO(dev_priv)->display.has_hdcp &&
1109			(DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1110}
1111
1112static int
1113hdcp2_prepare_ake_init(struct intel_connector *connector,
1114		       struct hdcp2_ake_init *ake_data)
1115{
 
1116	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1117	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1118	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1119	struct i915_hdcp_comp_master *comp;
1120	int ret;
1121
1122	mutex_lock(&dev_priv->hdcp_comp_mutex);
1123	comp = dev_priv->hdcp_master;
1124
1125	if (!comp || !comp->ops) {
1126		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1127		return -EINVAL;
1128	}
1129
1130	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1131	if (ret)
1132		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1133			    ret);
1134	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1135
1136	return ret;
1137}
1138
1139static int
1140hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1141				struct hdcp2_ake_send_cert *rx_cert,
1142				bool *paired,
1143				struct hdcp2_ake_no_stored_km *ek_pub_km,
1144				size_t *msg_sz)
1145{
 
1146	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1147	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1148	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1149	struct i915_hdcp_comp_master *comp;
1150	int ret;
1151
1152	mutex_lock(&dev_priv->hdcp_comp_mutex);
1153	comp = dev_priv->hdcp_master;
1154
1155	if (!comp || !comp->ops) {
1156		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1157		return -EINVAL;
1158	}
1159
1160	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1161							 rx_cert, paired,
1162							 ek_pub_km, msg_sz);
1163	if (ret < 0)
1164		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1165			    ret);
1166	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1167
1168	return ret;
1169}
1170
1171static int hdcp2_verify_hprime(struct intel_connector *connector,
1172			       struct hdcp2_ake_send_hprime *rx_hprime)
1173{
 
1174	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1175	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1176	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1177	struct i915_hdcp_comp_master *comp;
1178	int ret;
1179
1180	mutex_lock(&dev_priv->hdcp_comp_mutex);
1181	comp = dev_priv->hdcp_master;
1182
1183	if (!comp || !comp->ops) {
1184		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1185		return -EINVAL;
1186	}
1187
1188	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1189	if (ret < 0)
1190		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1191	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1192
1193	return ret;
1194}
1195
1196static int
1197hdcp2_store_pairing_info(struct intel_connector *connector,
1198			 struct hdcp2_ake_send_pairing_info *pairing_info)
1199{
 
1200	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1201	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1202	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1203	struct i915_hdcp_comp_master *comp;
1204	int ret;
1205
1206	mutex_lock(&dev_priv->hdcp_comp_mutex);
1207	comp = dev_priv->hdcp_master;
1208
1209	if (!comp || !comp->ops) {
1210		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1211		return -EINVAL;
1212	}
1213
1214	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1215	if (ret < 0)
1216		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1217			    ret);
1218	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1219
1220	return ret;
1221}
1222
1223static int
1224hdcp2_prepare_lc_init(struct intel_connector *connector,
1225		      struct hdcp2_lc_init *lc_init)
1226{
 
1227	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1228	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1229	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1230	struct i915_hdcp_comp_master *comp;
1231	int ret;
1232
1233	mutex_lock(&dev_priv->hdcp_comp_mutex);
1234	comp = dev_priv->hdcp_master;
1235
1236	if (!comp || !comp->ops) {
1237		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1238		return -EINVAL;
1239	}
1240
1241	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1242	if (ret < 0)
1243		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1244			    ret);
1245	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1246
1247	return ret;
1248}
1249
1250static int
1251hdcp2_verify_lprime(struct intel_connector *connector,
1252		    struct hdcp2_lc_send_lprime *rx_lprime)
1253{
 
1254	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1255	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1256	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1257	struct i915_hdcp_comp_master *comp;
1258	int ret;
1259
1260	mutex_lock(&dev_priv->hdcp_comp_mutex);
1261	comp = dev_priv->hdcp_master;
1262
1263	if (!comp || !comp->ops) {
1264		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1265		return -EINVAL;
1266	}
1267
1268	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1269	if (ret < 0)
1270		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1271			    ret);
1272	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1273
1274	return ret;
1275}
1276
1277static int hdcp2_prepare_skey(struct intel_connector *connector,
1278			      struct hdcp2_ske_send_eks *ske_data)
1279{
 
1280	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1281	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1282	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1283	struct i915_hdcp_comp_master *comp;
1284	int ret;
1285
1286	mutex_lock(&dev_priv->hdcp_comp_mutex);
1287	comp = dev_priv->hdcp_master;
1288
1289	if (!comp || !comp->ops) {
1290		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1291		return -EINVAL;
1292	}
1293
1294	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1295	if (ret < 0)
1296		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1297			    ret);
1298	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1299
1300	return ret;
1301}
1302
1303static int
1304hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1305				      struct hdcp2_rep_send_receiverid_list
1306								*rep_topology,
1307				      struct hdcp2_rep_send_ack *rep_send_ack)
1308{
 
1309	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1310	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1311	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1312	struct i915_hdcp_comp_master *comp;
1313	int ret;
1314
1315	mutex_lock(&dev_priv->hdcp_comp_mutex);
1316	comp = dev_priv->hdcp_master;
1317
1318	if (!comp || !comp->ops) {
1319		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1320		return -EINVAL;
1321	}
1322
1323	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1324							 rep_topology,
1325							 rep_send_ack);
 
1326	if (ret < 0)
1327		drm_dbg_kms(&dev_priv->drm,
1328			    "Verify rep topology failed. %d\n", ret);
1329	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1330
1331	return ret;
1332}
1333
1334static int
1335hdcp2_verify_mprime(struct intel_connector *connector,
1336		    struct hdcp2_rep_stream_ready *stream_ready)
1337{
 
1338	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1339	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1340	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1341	struct i915_hdcp_comp_master *comp;
1342	int ret;
1343
1344	mutex_lock(&dev_priv->hdcp_comp_mutex);
1345	comp = dev_priv->hdcp_master;
1346
1347	if (!comp || !comp->ops) {
1348		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1349		return -EINVAL;
1350	}
1351
1352	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1353	if (ret < 0)
1354		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1355	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1356
1357	return ret;
1358}
1359
1360static int hdcp2_authenticate_port(struct intel_connector *connector)
1361{
 
1362	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1363	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1364	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1365	struct i915_hdcp_comp_master *comp;
1366	int ret;
1367
1368	mutex_lock(&dev_priv->hdcp_comp_mutex);
1369	comp = dev_priv->hdcp_master;
1370
1371	if (!comp || !comp->ops) {
1372		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1373		return -EINVAL;
1374	}
1375
1376	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1377	if (ret < 0)
1378		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1379			    ret);
1380	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1381
1382	return ret;
1383}
1384
1385static int hdcp2_close_mei_session(struct intel_connector *connector)
1386{
 
1387	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1388	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1389	struct i915_hdcp_comp_master *comp;
1390	int ret;
1391
1392	mutex_lock(&dev_priv->hdcp_comp_mutex);
1393	comp = dev_priv->hdcp_master;
1394
1395	if (!comp || !comp->ops) {
1396		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1397		return -EINVAL;
1398	}
1399
1400	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1401					     &dig_port->hdcp_port_data);
1402	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1403
1404	return ret;
1405}
1406
1407static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1408{
1409	return hdcp2_close_mei_session(connector);
1410}
1411
1412/* Authentication flow starts from here */
1413static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1414{
1415	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1416	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
1417	struct intel_hdcp *hdcp = &connector->hdcp;
1418	union {
1419		struct hdcp2_ake_init ake_init;
1420		struct hdcp2_ake_send_cert send_cert;
1421		struct hdcp2_ake_no_stored_km no_stored_km;
1422		struct hdcp2_ake_send_hprime send_hprime;
1423		struct hdcp2_ake_send_pairing_info pairing_info;
1424	} msgs;
1425	const struct intel_hdcp_shim *shim = hdcp->shim;
1426	size_t size;
1427	int ret;
1428
1429	/* Init for seq_num */
1430	hdcp->seq_num_v = 0;
1431	hdcp->seq_num_m = 0;
1432
 
 
 
 
 
 
1433	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1434	if (ret < 0)
1435		return ret;
1436
1437	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1438				  sizeof(msgs.ake_init));
1439	if (ret < 0)
1440		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1441
1442	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1443				 &msgs.send_cert, sizeof(msgs.send_cert));
1444	if (ret < 0)
1445		return ret;
1446
1447	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1448		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1449		return -EINVAL;
1450	}
1451
1452	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1453
1454	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1455					msgs.send_cert.cert_rx.receiver_id,
1456					1) > 0) {
1457		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1458		return -EPERM;
1459	}
1460
1461	/*
1462	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1463	 * stored also.
1464	 */
1465	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1466					      &hdcp->is_paired,
1467					      &msgs.no_stored_km, &size);
1468	if (ret < 0)
1469		return ret;
1470
1471	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1472	if (ret < 0)
1473		return ret;
1474
1475	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1476				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1477	if (ret < 0)
1478		return ret;
1479
1480	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1481	if (ret < 0)
1482		return ret;
1483
1484	if (!hdcp->is_paired) {
1485		/* Pairing is required */
1486		ret = shim->read_2_2_msg(dig_port,
1487					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1488					 &msgs.pairing_info,
1489					 sizeof(msgs.pairing_info));
1490		if (ret < 0)
1491			return ret;
1492
1493		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1494		if (ret < 0)
1495			return ret;
1496		hdcp->is_paired = true;
1497	}
1498
1499	return 0;
1500}
1501
1502static int hdcp2_locality_check(struct intel_connector *connector)
1503{
1504	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1505	struct intel_hdcp *hdcp = &connector->hdcp;
1506	union {
1507		struct hdcp2_lc_init lc_init;
1508		struct hdcp2_lc_send_lprime send_lprime;
1509	} msgs;
1510	const struct intel_hdcp_shim *shim = hdcp->shim;
1511	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1512
1513	for (i = 0; i < tries; i++) {
1514		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1515		if (ret < 0)
1516			continue;
1517
1518		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1519				      sizeof(msgs.lc_init));
1520		if (ret < 0)
1521			continue;
1522
1523		ret = shim->read_2_2_msg(dig_port,
1524					 HDCP_2_2_LC_SEND_LPRIME,
1525					 &msgs.send_lprime,
1526					 sizeof(msgs.send_lprime));
1527		if (ret < 0)
1528			continue;
1529
1530		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1531		if (!ret)
1532			break;
1533	}
1534
1535	return ret;
1536}
1537
1538static int hdcp2_session_key_exchange(struct intel_connector *connector)
1539{
1540	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1541	struct intel_hdcp *hdcp = &connector->hdcp;
1542	struct hdcp2_ske_send_eks send_eks;
1543	int ret;
1544
1545	ret = hdcp2_prepare_skey(connector, &send_eks);
1546	if (ret < 0)
1547		return ret;
1548
1549	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1550					sizeof(send_eks));
1551	if (ret < 0)
1552		return ret;
1553
1554	return 0;
1555}
1556
1557static
1558int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1559{
1560	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1561	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1562	struct intel_hdcp *hdcp = &connector->hdcp;
1563	union {
1564		struct hdcp2_rep_stream_manage stream_manage;
1565		struct hdcp2_rep_stream_ready stream_ready;
1566	} msgs;
1567	const struct intel_hdcp_shim *shim = hdcp->shim;
1568	int ret, streams_size_delta, i;
1569
1570	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1571		return -ERANGE;
1572
1573	/* Prepare RepeaterAuth_Stream_Manage msg */
1574	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1575	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1576
1577	msgs.stream_manage.k = cpu_to_be16(data->k);
1578
1579	for (i = 0; i < data->k; i++) {
1580		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1581		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1582	}
1583
1584	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1585				sizeof(struct hdcp2_streamid_type);
1586	/* Send it to Repeater */
1587	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1588				  sizeof(msgs.stream_manage) - streams_size_delta);
1589	if (ret < 0)
1590		goto out;
1591
1592	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1593				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1594	if (ret < 0)
1595		goto out;
1596
1597	data->seq_num_m = hdcp->seq_num_m;
1598
1599	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1600
1601out:
1602	hdcp->seq_num_m++;
1603
1604	return ret;
1605}
1606
1607static
1608int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1609{
 
1610	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1611	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1612	struct intel_hdcp *hdcp = &connector->hdcp;
1613	union {
1614		struct hdcp2_rep_send_receiverid_list recvid_list;
1615		struct hdcp2_rep_send_ack rep_ack;
1616	} msgs;
1617	const struct intel_hdcp_shim *shim = hdcp->shim;
1618	u32 seq_num_v, device_cnt;
1619	u8 *rx_info;
1620	int ret;
1621
1622	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1623				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1624	if (ret < 0)
1625		return ret;
1626
1627	rx_info = msgs.recvid_list.rx_info;
1628
1629	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1630	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1631		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1632		return -EINVAL;
1633	}
1634
1635	/* Converting and Storing the seq_num_v to local variable as DWORD */
1636	seq_num_v =
1637		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1638
1639	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1640		drm_dbg_kms(&dev_priv->drm,
1641			    "Non zero Seq_num_v at first RecvId_List msg\n");
1642		return -EINVAL;
1643	}
1644
1645	if (seq_num_v < hdcp->seq_num_v) {
1646		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1647		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1648		return -EINVAL;
1649	}
1650
1651	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1652		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1653	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1654					msgs.recvid_list.receiver_ids,
1655					device_cnt) > 0) {
1656		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1657		return -EPERM;
1658	}
1659
1660	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1661						    &msgs.recvid_list,
1662						    &msgs.rep_ack);
1663	if (ret < 0)
1664		return ret;
1665
1666	hdcp->seq_num_v = seq_num_v;
1667	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1668				  sizeof(msgs.rep_ack));
1669	if (ret < 0)
1670		return ret;
1671
1672	return 0;
1673}
1674
1675static int hdcp2_authenticate_sink(struct intel_connector *connector)
1676{
1677	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1678	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1679	struct intel_hdcp *hdcp = &connector->hdcp;
1680	const struct intel_hdcp_shim *shim = hdcp->shim;
1681	int ret;
1682
1683	ret = hdcp2_authentication_key_exchange(connector);
1684	if (ret < 0) {
1685		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1686		return ret;
1687	}
1688
1689	ret = hdcp2_locality_check(connector);
1690	if (ret < 0) {
1691		drm_dbg_kms(&i915->drm,
1692			    "Locality Check failed. Err : %d\n", ret);
1693		return ret;
1694	}
1695
1696	ret = hdcp2_session_key_exchange(connector);
1697	if (ret < 0) {
1698		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1699		return ret;
1700	}
1701
1702	if (shim->config_stream_type) {
1703		ret = shim->config_stream_type(dig_port,
1704					       hdcp->is_repeater,
1705					       hdcp->content_type);
1706		if (ret < 0)
1707			return ret;
1708	}
1709
1710	if (hdcp->is_repeater) {
1711		ret = hdcp2_authenticate_repeater_topology(connector);
1712		if (ret < 0) {
1713			drm_dbg_kms(&i915->drm,
1714				    "Repeater Auth Failed. Err: %d\n", ret);
1715			return ret;
1716		}
1717	}
1718
1719	return ret;
1720}
1721
1722static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1723{
 
1724	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1725	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1726	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1727	struct intel_hdcp *hdcp = &connector->hdcp;
1728	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1729	enum port port = dig_port->base.port;
1730	int ret = 0;
1731
1732	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1733			    LINK_ENCRYPTION_STATUS)) {
1734		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1735			connector->base.name, connector->base.base.id);
1736		ret = -EPERM;
1737		goto link_recover;
1738	}
1739
1740	if (hdcp->shim->stream_2_2_encryption) {
1741		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1742		if (ret) {
1743			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1744				connector->base.name, connector->base.base.id);
1745			return ret;
1746		}
1747		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1748			    transcoder_name(hdcp->stream_transcoder));
1749	}
1750
1751	return 0;
1752
1753link_recover:
1754	if (hdcp2_deauthenticate_port(connector) < 0)
1755		drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1756
1757	dig_port->hdcp_auth_status = false;
1758	data->k = 0;
1759
1760	return ret;
1761}
1762
1763static int hdcp2_enable_encryption(struct intel_connector *connector)
1764{
 
1765	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1766	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1767	struct intel_hdcp *hdcp = &connector->hdcp;
1768	enum port port = dig_port->base.port;
1769	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1770	int ret;
1771
1772	drm_WARN_ON(&dev_priv->drm,
1773		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1774		    LINK_ENCRYPTION_STATUS);
1775	if (hdcp->shim->toggle_signalling) {
1776		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1777						    true);
1778		if (ret) {
1779			drm_err(&dev_priv->drm,
1780				"Failed to enable HDCP signalling. %d\n",
1781				ret);
1782			return ret;
1783		}
1784	}
1785
1786	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1787	    LINK_AUTH_STATUS) {
1788		/* Link is Authenticated. Now set for Encryption */
1789		intel_de_write(dev_priv,
1790			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1791			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1792	}
1793
1794	ret = intel_de_wait_for_set(dev_priv,
1795				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1796						 port),
1797				    LINK_ENCRYPTION_STATUS,
1798				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1799	dig_port->hdcp_auth_status = true;
1800
1801	return ret;
1802}
1803
1804static int hdcp2_disable_encryption(struct intel_connector *connector)
1805{
 
1806	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1807	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1808	struct intel_hdcp *hdcp = &connector->hdcp;
1809	enum port port = dig_port->base.port;
1810	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1811	int ret;
1812
1813	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1814				      LINK_ENCRYPTION_STATUS));
 
1815
1816	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1817		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1818
1819	ret = intel_de_wait_for_clear(dev_priv,
1820				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1821						   port),
1822				      LINK_ENCRYPTION_STATUS,
1823				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1824	if (ret == -ETIMEDOUT)
1825		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1826
1827	if (hdcp->shim->toggle_signalling) {
1828		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1829						    false);
1830		if (ret) {
1831			drm_err(&dev_priv->drm,
1832				"Failed to disable HDCP signalling. %d\n",
1833				ret);
1834			return ret;
1835		}
1836	}
1837
1838	return ret;
1839}
1840
1841static int
1842hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1843{
1844	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1845	int i, tries = 3, ret;
1846
1847	if (!connector->hdcp.is_repeater)
1848		return 0;
1849
1850	for (i = 0; i < tries; i++) {
1851		ret = _hdcp2_propagate_stream_management_info(connector);
1852		if (!ret)
1853			break;
1854
1855		/* Lets restart the auth incase of seq_num_m roll over */
1856		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1857			drm_dbg_kms(&i915->drm,
1858				    "seq_num_m roll over.(%d)\n", ret);
1859			break;
1860		}
1861
1862		drm_dbg_kms(&i915->drm,
1863			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1864			    i + 1, tries, ret);
1865	}
1866
1867	return ret;
1868}
1869
1870static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 
1871{
 
1872	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1873	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1874	int ret = 0, i, tries = 3;
1875
1876	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1877		ret = hdcp2_authenticate_sink(connector);
1878		if (!ret) {
 
 
 
 
 
 
 
 
1879			ret = hdcp2_propagate_stream_management_info(connector);
1880			if (ret) {
1881				drm_dbg_kms(&i915->drm,
1882					    "Stream management failed.(%d)\n",
1883					    ret);
1884				break;
1885			}
1886
1887			ret = hdcp2_authenticate_port(connector);
1888			if (!ret)
1889				break;
1890			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1891				    ret);
1892		}
1893
1894		/* Clearing the mei hdcp session */
1895		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1896			    i + 1, tries, ret);
1897		if (hdcp2_deauthenticate_port(connector) < 0)
1898			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1899	}
1900
1901	if (!ret && !dig_port->hdcp_auth_status) {
1902		/*
1903		 * Ensuring the required 200mSec min time interval between
1904		 * Session Key Exchange and encryption.
1905		 */
1906		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1907		ret = hdcp2_enable_encryption(connector);
1908		if (ret < 0) {
1909			drm_dbg_kms(&i915->drm,
1910				    "Encryption Enable Failed.(%d)\n", ret);
1911			if (hdcp2_deauthenticate_port(connector) < 0)
1912				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1913		}
1914	}
1915
1916	if (!ret)
1917		ret = hdcp2_enable_stream_encryption(connector);
1918
1919	return ret;
1920}
1921
1922static int _intel_hdcp2_enable(struct intel_connector *connector)
 
1923{
1924	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1925	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1926	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1927	struct intel_hdcp *hdcp = &connector->hdcp;
1928	int ret;
1929
1930	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1931		    connector->base.name, connector->base.base.id,
1932		    hdcp->content_type);
1933
1934	/* Stream which requires encryption */
1935	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
1936		data->k = 1;
1937		data->streams[0].stream_type = hdcp->content_type;
1938	} else {
1939		ret = intel_hdcp_required_content_stream(dig_port);
1940		if (ret)
1941			return ret;
1942	}
1943
1944	ret = hdcp2_authenticate_and_encrypt(connector);
1945	if (ret) {
1946		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1947			    hdcp->content_type, ret);
1948		return ret;
1949	}
1950
1951	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1952		    connector->base.name, connector->base.base.id,
1953		    hdcp->content_type);
1954
1955	hdcp->hdcp2_encrypted = true;
1956	return 0;
1957}
1958
1959static int
1960_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1961{
 
1962	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1963	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1964	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1965	struct intel_hdcp *hdcp = &connector->hdcp;
1966	int ret;
1967
1968	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1969		    connector->base.name, connector->base.base.id);
1970
1971	if (hdcp->shim->stream_2_2_encryption) {
1972		ret = hdcp->shim->stream_2_2_encryption(connector, false);
1973		if (ret) {
1974			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1975				connector->base.name, connector->base.base.id);
1976			return ret;
1977		}
1978		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1979			    transcoder_name(hdcp->stream_transcoder));
1980
1981		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1982			return 0;
1983	}
1984
1985	ret = hdcp2_disable_encryption(connector);
1986
1987	if (hdcp2_deauthenticate_port(connector) < 0)
1988		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1989
1990	connector->hdcp.hdcp2_encrypted = false;
1991	dig_port->hdcp_auth_status = false;
1992	data->k = 0;
1993
1994	return ret;
1995}
1996
1997/* Implements the Link Integrity Check for HDCP2.2 */
1998static int intel_hdcp2_check_link(struct intel_connector *connector)
1999{
 
2000	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2001	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2002	struct intel_hdcp *hdcp = &connector->hdcp;
2003	enum port port = dig_port->base.port;
2004	enum transcoder cpu_transcoder;
2005	int ret = 0;
2006
2007	mutex_lock(&hdcp->mutex);
2008	mutex_lock(&dig_port->hdcp_mutex);
2009	cpu_transcoder = hdcp->cpu_transcoder;
2010
2011	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2012	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2013	    !hdcp->hdcp2_encrypted) {
2014		ret = -EINVAL;
2015		goto out;
2016	}
2017
2018	if (drm_WARN_ON(&dev_priv->drm,
2019			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2020		drm_err(&dev_priv->drm,
2021			"HDCP2.2 link stopped the encryption, %x\n",
2022			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2023		ret = -ENXIO;
2024		_intel_hdcp2_disable(connector, true);
2025		intel_hdcp_update_value(connector,
2026					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2027					true);
2028		goto out;
2029	}
2030
2031	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2032	if (ret == HDCP_LINK_PROTECTED) {
2033		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2034			intel_hdcp_update_value(connector,
2035					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2036					true);
2037		}
2038		goto out;
2039	}
2040
2041	if (ret == HDCP_TOPOLOGY_CHANGE) {
2042		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2043			goto out;
2044
2045		drm_dbg_kms(&dev_priv->drm,
2046			    "HDCP2.2 Downstream topology change\n");
 
2047		ret = hdcp2_authenticate_repeater_topology(connector);
2048		if (!ret) {
2049			intel_hdcp_update_value(connector,
2050					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2051					true);
2052			goto out;
2053		}
2054		drm_dbg_kms(&dev_priv->drm,
2055			    "[%s:%d] Repeater topology auth failed.(%d)\n",
2056			    connector->base.name, connector->base.base.id,
 
2057			    ret);
2058	} else {
2059		drm_dbg_kms(&dev_priv->drm,
2060			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2061			    connector->base.name, connector->base.base.id);
2062	}
2063
2064	ret = _intel_hdcp2_disable(connector, true);
2065	if (ret) {
2066		drm_err(&dev_priv->drm,
2067			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2068			connector->base.name, connector->base.base.id, ret);
2069		intel_hdcp_update_value(connector,
2070				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2071		goto out;
2072	}
2073
2074	ret = _intel_hdcp2_enable(connector);
2075	if (ret) {
2076		drm_dbg_kms(&dev_priv->drm,
2077			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2078			    connector->base.name, connector->base.base.id,
2079			    ret);
2080		intel_hdcp_update_value(connector,
2081					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2082					true);
2083		goto out;
2084	}
2085
2086out:
2087	mutex_unlock(&dig_port->hdcp_mutex);
2088	mutex_unlock(&hdcp->mutex);
2089	return ret;
2090}
2091
2092static void intel_hdcp_check_work(struct work_struct *work)
2093{
2094	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2095					       struct intel_hdcp,
2096					       check_work);
2097	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
 
 
2098
2099	if (drm_connector_is_unregistered(&connector->base))
2100		return;
2101
2102	if (!intel_hdcp2_check_link(connector))
2103		schedule_delayed_work(&hdcp->check_work,
2104				      DRM_HDCP2_CHECK_PERIOD_MS);
2105	else if (!intel_hdcp_check_link(connector))
2106		schedule_delayed_work(&hdcp->check_work,
2107				      DRM_HDCP_CHECK_PERIOD_MS);
2108}
2109
2110static int i915_hdcp_component_bind(struct device *i915_kdev,
2111				    struct device *mei_kdev, void *data)
2112{
2113	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2114
2115	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2116	mutex_lock(&dev_priv->hdcp_comp_mutex);
2117	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
2118	dev_priv->hdcp_master->mei_dev = mei_kdev;
2119	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2120
2121	return 0;
2122}
2123
2124static void i915_hdcp_component_unbind(struct device *i915_kdev,
2125				       struct device *mei_kdev, void *data)
2126{
2127	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2128
2129	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2130	mutex_lock(&dev_priv->hdcp_comp_mutex);
2131	dev_priv->hdcp_master = NULL;
2132	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2133}
2134
2135static const struct component_ops i915_hdcp_component_ops = {
2136	.bind   = i915_hdcp_component_bind,
2137	.unbind = i915_hdcp_component_unbind,
2138};
2139
2140static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2141{
2142	switch (port) {
2143	case PORT_A:
2144		return MEI_DDI_A;
2145	case PORT_B ... PORT_F:
2146		return (enum mei_fw_ddi)port;
2147	default:
2148		return MEI_DDI_INVALID_PORT;
2149	}
2150}
2151
2152static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2153{
2154	switch (cpu_transcoder) {
2155	case TRANSCODER_A ... TRANSCODER_D:
2156		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2157	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2158		return MEI_INVALID_TRANSCODER;
2159	}
2160}
2161
2162static int initialize_hdcp_port_data(struct intel_connector *connector,
2163				     struct intel_digital_port *dig_port,
2164				     const struct intel_hdcp_shim *shim)
2165{
2166	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2167	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2168	struct intel_hdcp *hdcp = &connector->hdcp;
2169	enum port port = dig_port->base.port;
2170
2171	if (DISPLAY_VER(dev_priv) < 12)
2172		data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2173	else
2174		/*
2175		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2176		 * with zero(INVALID PORT index).
2177		 */
2178		data->fw_ddi = MEI_DDI_INVALID_PORT;
2179
2180	/*
2181	 * As associated transcoder is set and modified at modeset, here fw_tc
2182	 * is initialized to zero (invalid transcoder index). This will be
2183	 * retained for <Gen12 forever.
2184	 */
2185	data->fw_tc = MEI_INVALID_TRANSCODER;
2186
2187	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2188	data->protocol = (u8)shim->protocol;
2189
2190	if (!data->streams)
2191		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2192					sizeof(struct hdcp2_streamid_type),
2193					GFP_KERNEL);
2194	if (!data->streams) {
2195		drm_err(&dev_priv->drm, "Out of Memory\n");
2196		return -ENOMEM;
2197	}
2198	/* For SST */
2199	data->streams[0].stream_id = 0;
2200	data->streams[0].stream_type = hdcp->content_type;
2201
2202	return 0;
2203}
2204
2205static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2206{
 
 
 
 
 
2207	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2208		return false;
2209
2210	return (DISPLAY_VER(dev_priv) >= 10 ||
2211		IS_KABYLAKE(dev_priv) ||
2212		IS_COFFEELAKE(dev_priv) ||
2213		IS_COMETLAKE(dev_priv));
2214}
2215
2216void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2217{
2218	int ret;
2219
2220	if (!is_hdcp2_supported(dev_priv))
2221		return;
2222
2223	mutex_lock(&dev_priv->hdcp_comp_mutex);
2224	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
 
 
 
 
 
 
 
 
2225
2226	dev_priv->hdcp_comp_added = true;
2227	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2228	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2229				  I915_COMPONENT_HDCP);
2230	if (ret < 0) {
2231		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2232			    ret);
2233		mutex_lock(&dev_priv->hdcp_comp_mutex);
2234		dev_priv->hdcp_comp_added = false;
2235		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2236		return;
2237	}
2238}
2239
2240static void intel_hdcp2_init(struct intel_connector *connector,
2241			     struct intel_digital_port *dig_port,
2242			     const struct intel_hdcp_shim *shim)
2243{
2244	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2245	struct intel_hdcp *hdcp = &connector->hdcp;
2246	int ret;
2247
2248	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2249	if (ret) {
2250		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2251		return;
2252	}
2253
2254	hdcp->hdcp2_supported = true;
2255}
2256
2257int intel_hdcp_init(struct intel_connector *connector,
2258		    struct intel_digital_port *dig_port,
2259		    const struct intel_hdcp_shim *shim)
2260{
2261	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2262	struct intel_hdcp *hdcp = &connector->hdcp;
2263	int ret;
2264
2265	if (!shim)
2266		return -EINVAL;
2267
2268	if (is_hdcp2_supported(dev_priv))
2269		intel_hdcp2_init(connector, dig_port, shim);
2270
2271	ret =
2272	drm_connector_attach_content_protection_property(&connector->base,
2273							 hdcp->hdcp2_supported);
2274	if (ret) {
2275		hdcp->hdcp2_supported = false;
2276		kfree(dig_port->hdcp_port_data.streams);
2277		return ret;
2278	}
2279
2280	hdcp->shim = shim;
2281	mutex_init(&hdcp->mutex);
2282	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2283	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2284	init_waitqueue_head(&hdcp->cp_irq_queue);
2285
2286	return 0;
2287}
2288
2289int intel_hdcp_enable(struct intel_connector *connector,
2290		      const struct intel_crtc_state *pipe_config, u8 content_type)
 
 
2291{
2292	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
 
 
2293	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2294	struct intel_hdcp *hdcp = &connector->hdcp;
2295	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2296	int ret = -EINVAL;
2297
2298	if (!hdcp->shim)
2299		return -ENOENT;
2300
2301	if (!connector->encoder) {
2302		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2303			connector->base.name, connector->base.base.id);
2304		return -ENODEV;
2305	}
2306
2307	mutex_lock(&hdcp->mutex);
2308	mutex_lock(&dig_port->hdcp_mutex);
2309	drm_WARN_ON(&dev_priv->drm,
2310		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2311	hdcp->content_type = content_type;
2312
2313	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2314		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2315		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2316	} else {
2317		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2318		hdcp->stream_transcoder = INVALID_TRANSCODER;
2319	}
2320
2321	if (DISPLAY_VER(dev_priv) >= 12)
2322		dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
 
2323
2324	/*
2325	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2326	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2327	 */
2328	if (intel_hdcp2_capable(connector)) {
2329		ret = _intel_hdcp2_enable(connector);
2330		if (!ret)
2331			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
 
2332	}
2333
2334	/*
2335	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2336	 * be attempted.
2337	 */
2338	if (ret && intel_hdcp_capable(connector) &&
2339	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2340		ret = _intel_hdcp_enable(connector);
2341	}
2342
2343	if (!ret) {
2344		schedule_delayed_work(&hdcp->check_work, check_link_interval);
 
2345		intel_hdcp_update_value(connector,
2346					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2347					true);
2348	}
2349
2350	mutex_unlock(&dig_port->hdcp_mutex);
2351	mutex_unlock(&hdcp->mutex);
2352	return ret;
2353}
2354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2355int intel_hdcp_disable(struct intel_connector *connector)
2356{
2357	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2358	struct intel_hdcp *hdcp = &connector->hdcp;
2359	int ret = 0;
2360
2361	if (!hdcp->shim)
2362		return -ENOENT;
2363
2364	mutex_lock(&hdcp->mutex);
2365	mutex_lock(&dig_port->hdcp_mutex);
2366
2367	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2368		goto out;
2369
2370	intel_hdcp_update_value(connector,
2371				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2372	if (hdcp->hdcp2_encrypted)
2373		ret = _intel_hdcp2_disable(connector, false);
2374	else if (hdcp->hdcp_encrypted)
2375		ret = _intel_hdcp_disable(connector);
2376
2377out:
2378	mutex_unlock(&dig_port->hdcp_mutex);
2379	mutex_unlock(&hdcp->mutex);
2380	cancel_delayed_work_sync(&hdcp->check_work);
2381	return ret;
2382}
2383
2384void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2385			    struct intel_encoder *encoder,
2386			    const struct intel_crtc_state *crtc_state,
2387			    const struct drm_connector_state *conn_state)
2388{
2389	struct intel_connector *connector =
2390				to_intel_connector(conn_state->connector);
2391	struct intel_hdcp *hdcp = &connector->hdcp;
2392	bool content_protection_type_changed, desired_and_not_enabled = false;
 
2393
2394	if (!connector->hdcp.shim)
2395		return;
2396
2397	content_protection_type_changed =
2398		(conn_state->hdcp_content_type != hdcp->content_type &&
2399		 conn_state->content_protection !=
2400		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2401
2402	/*
2403	 * During the HDCP encryption session if Type change is requested,
2404	 * disable the HDCP and reenable it with new TYPE value.
2405	 */
2406	if (conn_state->content_protection ==
2407	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2408	    content_protection_type_changed)
2409		intel_hdcp_disable(connector);
2410
2411	/*
2412	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2413	 * change procedure.
2414	 */
2415	if (content_protection_type_changed) {
2416		mutex_lock(&hdcp->mutex);
2417		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2418		drm_connector_get(&connector->base);
2419		schedule_work(&hdcp->prop_work);
 
2420		mutex_unlock(&hdcp->mutex);
2421	}
2422
2423	if (conn_state->content_protection ==
2424	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2425		mutex_lock(&hdcp->mutex);
2426		/* Avoid enabling hdcp, if it already ENABLED */
2427		desired_and_not_enabled =
2428			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2429		mutex_unlock(&hdcp->mutex);
2430		/*
2431		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2432		 * prop_work to update correct CP property to user space.
2433		 */
2434		if (!desired_and_not_enabled && !content_protection_type_changed) {
2435			drm_connector_get(&connector->base);
2436			schedule_work(&hdcp->prop_work);
 
 
2437		}
2438	}
2439
2440	if (desired_and_not_enabled || content_protection_type_changed)
2441		intel_hdcp_enable(connector,
2442				  crtc_state,
2443				  (u8)conn_state->hdcp_content_type);
2444}
2445
2446void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2447{
2448	mutex_lock(&dev_priv->hdcp_comp_mutex);
2449	if (!dev_priv->hdcp_comp_added) {
2450		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2451		return;
2452	}
2453
2454	dev_priv->hdcp_comp_added = false;
2455	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2456
2457	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
 
 
 
2458}
2459
2460void intel_hdcp_cleanup(struct intel_connector *connector)
2461{
2462	struct intel_hdcp *hdcp = &connector->hdcp;
2463
2464	if (!hdcp->shim)
2465		return;
2466
2467	/*
2468	 * If the connector is registered, it's possible userspace could kick
2469	 * off another HDCP enable, which would re-spawn the workers.
2470	 */
2471	drm_WARN_ON(connector->base.dev,
2472		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2473
2474	/*
2475	 * Now that the connector is not registered, check_work won't be run,
2476	 * but cancel any outstanding instances of it
2477	 */
2478	cancel_delayed_work_sync(&hdcp->check_work);
2479
2480	/*
2481	 * We don't cancel prop_work in the same way as check_work since it
2482	 * requires connection_mutex which could be held while calling this
2483	 * function. Instead, we rely on the connector references grabbed before
2484	 * scheduling prop_work to ensure the connector is alive when prop_work
2485	 * is run. So if we're in the destroy path (which is where this
2486	 * function should be called), we're "guaranteed" that prop_work is not
2487	 * active (tl;dr This Should Never Happen).
2488	 */
2489	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2490
2491	mutex_lock(&hdcp->mutex);
2492	hdcp->shim = NULL;
2493	mutex_unlock(&hdcp->mutex);
2494}
2495
2496void intel_hdcp_atomic_check(struct drm_connector *connector,
2497			     struct drm_connector_state *old_state,
2498			     struct drm_connector_state *new_state)
2499{
2500	u64 old_cp = old_state->content_protection;
2501	u64 new_cp = new_state->content_protection;
2502	struct drm_crtc_state *crtc_state;
2503
2504	if (!new_state->crtc) {
2505		/*
2506		 * If the connector is being disabled with CP enabled, mark it
2507		 * desired so it's re-enabled when the connector is brought back
2508		 */
2509		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2510			new_state->content_protection =
2511				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2512		return;
2513	}
2514
2515	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2516						   new_state->crtc);
2517	/*
2518	 * Fix the HDCP uapi content protection state in case of modeset.
2519	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2520	 * need to be sent if there is transition from ENABLED->DESIRED.
2521	 */
2522	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2523	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2524	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2525		new_state->content_protection =
2526			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2527
2528	/*
2529	 * Nothing to do if the state didn't change, or HDCP was activated since
2530	 * the last commit. And also no change in hdcp content type.
2531	 */
2532	if (old_cp == new_cp ||
2533	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2534	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2535		if (old_state->hdcp_content_type ==
2536				new_state->hdcp_content_type)
2537			return;
2538	}
2539
2540	crtc_state->mode_changed = true;
2541}
2542
2543/* Handles the CP_IRQ raised from the DP HDCP sink */
2544void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2545{
2546	struct intel_hdcp *hdcp = &connector->hdcp;
 
 
2547
2548	if (!hdcp->shim)
2549		return;
2550
2551	atomic_inc(&connector->hdcp.cp_irq_count);
2552	wake_up_all(&connector->hdcp.cp_irq_queue);
2553
2554	schedule_delayed_work(&hdcp->check_work, 0);
2555}
v6.13.7
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright (C) 2017 Google, Inc.
   4 * Copyright _ 2017-2019, Intel Corporation.
   5 *
   6 * Authors:
   7 * Sean Paul <seanpaul@chromium.org>
   8 * Ramalingam C <ramalingam.c@intel.com>
   9 */
  10
  11#include <linux/component.h>
  12#include <linux/i2c.h>
  13#include <linux/random.h>
  14
  15#include <drm/display/drm_hdcp_helper.h>
  16#include <drm/intel/i915_component.h>
  17
  18#include "i915_drv.h"
  19#include "i915_reg.h"
  20#include "intel_connector.h"
  21#include "intel_de.h"
  22#include "intel_display_power.h"
  23#include "intel_display_power_well.h"
  24#include "intel_display_types.h"
  25#include "intel_hdcp.h"
  26#include "intel_hdcp_gsc.h"
  27#include "intel_hdcp_regs.h"
  28#include "intel_hdcp_shim.h"
  29#include "intel_pcode.h"
  30
  31#define KEY_LOAD_TRIES	5
  32#define HDCP2_LC_RETRY_CNT			3
  33
  34/* WA: 16022217614 */
  35static void
  36intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
  37				      struct intel_hdcp *hdcp)
  38{
  39	struct intel_display *display = to_intel_display(encoder);
  40
  41	/* Here we assume HDMI is in TMDS mode of operation */
  42	if (encoder->type != INTEL_OUTPUT_HDMI)
  43		return;
  44
  45	if (DISPLAY_VER(display) >= 14) {
  46		if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER))
  47			intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
  48				     0, HDCP_LINE_REKEY_DISABLE);
  49		else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
  50			 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER))
  51			intel_de_rmw(display,
  52				     TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder),
  53				     0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
  54	}
  55}
  56
  57static int intel_conn_to_vcpi(struct intel_atomic_state *state,
  58			      struct intel_connector *connector)
  59{
  60	struct drm_dp_mst_topology_mgr *mgr;
  61	struct drm_dp_mst_atomic_payload *payload;
  62	struct drm_dp_mst_topology_state *mst_state;
  63	int vcpi = 0;
  64
  65	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
  66	if (!connector->port)
  67		return 0;
  68	mgr = connector->port->mgr;
  69
  70	drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
  71	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
  72	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
  73	if (drm_WARN_ON(mgr->dev, !payload))
  74		goto out;
  75
  76	vcpi = payload->vcpi;
  77	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
  78		vcpi = 0;
  79		goto out;
  80	}
  81out:
  82	return vcpi;
  83}
  84
  85/*
  86 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
  87 * content_type for all streams in DP MST topology because security f/w doesn't
  88 * have any provision to mark content_type for each stream separately, it marks
  89 * all available streams with the content_type proivided at the time of port
  90 * authentication. This may prohibit the userspace to use type1 content on
  91 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
  92 * DP MST topology. Though it is not compulsory, security fw should change its
  93 * policy to mark different content_types for different streams.
  94 */
  95static int
  96intel_hdcp_required_content_stream(struct intel_atomic_state *state,
  97				   struct intel_digital_port *dig_port)
  98{
  99	struct intel_display *display = to_intel_display(state);
 100	struct drm_connector_list_iter conn_iter;
 101	struct intel_digital_port *conn_dig_port;
 102	struct intel_connector *connector;
 
 103	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 104	bool enforce_type0 = false;
 105	int k;
 106
 
 
 107	if (dig_port->hdcp_auth_status)
 108		return 0;
 109
 110	data->k = 0;
 111
 112	if (!dig_port->hdcp_mst_type1_capable)
 113		enforce_type0 = true;
 114
 115	drm_connector_list_iter_begin(display->drm, &conn_iter);
 116	for_each_intel_connector_iter(connector, &conn_iter) {
 117		if (connector->base.status == connector_status_disconnected)
 118			continue;
 119
 120		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
 121			continue;
 122
 123		conn_dig_port = intel_attached_dig_port(connector);
 124		if (conn_dig_port != dig_port)
 125			continue;
 126
 127		data->streams[data->k].stream_id =
 128			intel_conn_to_vcpi(state, connector);
 
 
 129		data->k++;
 130
 131		/* if there is only one active stream */
 132		if (dig_port->dp.active_mst_links <= 1)
 133			break;
 134	}
 135	drm_connector_list_iter_end(&conn_iter);
 136
 137	if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0))
 138		return -EINVAL;
 139
 140	/*
 141	 * Apply common protection level across all streams in DP MST Topology.
 142	 * Use highest supported content type for all streams in DP MST Topology.
 143	 */
 144	for (k = 0; k < data->k; k++)
 145		data->streams[k].stream_type =
 146			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
 147
 148	return 0;
 149}
 150
 151static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
 152				      struct intel_connector *connector)
 153{
 154	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 155	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 156	struct intel_hdcp *hdcp = &connector->hdcp;
 157
 158	if (intel_encoder_is_mst(intel_attached_encoder(connector)))
 159		return intel_hdcp_required_content_stream(state, dig_port);
 160
 161	data->k = 1;
 162	data->streams[0].stream_id = 0;
 163	data->streams[0].stream_type = hdcp->content_type;
 164
 165	return 0;
 166}
 167
 168static
 169bool intel_hdcp_is_ksv_valid(u8 *ksv)
 170{
 171	int i, ones = 0;
 172	/* KSV has 20 1's and 20 0's */
 173	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
 174		ones += hweight8(ksv[i]);
 175	if (ones != 20)
 176		return false;
 177
 178	return true;
 179}
 180
 181static
 182int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
 183			       const struct intel_hdcp_shim *shim, u8 *bksv)
 184{
 185	struct intel_display *display = to_intel_display(dig_port);
 186	int ret, i, tries = 2;
 187
 188	/* HDCP spec states that we must retry the bksv if it is invalid */
 189	for (i = 0; i < tries; i++) {
 190		ret = shim->read_bksv(dig_port, bksv);
 191		if (ret)
 192			return ret;
 193		if (intel_hdcp_is_ksv_valid(bksv))
 194			break;
 195	}
 196	if (i == tries) {
 197		drm_dbg_kms(display->drm, "Bksv is invalid\n");
 198		return -ENODEV;
 199	}
 200
 201	return 0;
 202}
 203
 204/* Is HDCP1.4 capable on Platform and Sink */
 205bool intel_hdcp_get_capability(struct intel_connector *connector)
 206{
 207	struct intel_digital_port *dig_port;
 208	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 209	bool capable = false;
 210	u8 bksv[5];
 211
 212	if (!intel_attached_encoder(connector))
 213		return capable;
 214
 215	dig_port = intel_attached_dig_port(connector);
 216
 217	if (!shim)
 218		return capable;
 219
 220	if (shim->hdcp_get_capability) {
 221		shim->hdcp_get_capability(dig_port, &capable);
 222	} else {
 223		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
 224			capable = true;
 225	}
 226
 227	return capable;
 228}
 229
 230/*
 231 * Check if the source has all the building blocks ready to make
 232 * HDCP 2.2 work
 233 */
 234static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
 235{
 236	struct intel_display *display = to_intel_display(connector);
 
 237	struct intel_hdcp *hdcp = &connector->hdcp;
 
 238
 239	/* I915 support for HDCP2.2 */
 240	if (!hdcp->hdcp2_supported)
 241		return false;
 242
 243	/* If MTL+ make sure gsc is loaded and proxy is setup */
 244	if (intel_hdcp_gsc_cs_required(display)) {
 245		if (!intel_hdcp_gsc_check_status(display))
 246			return false;
 247	}
 248
 249	/* MEI/GSC interface is solid depending on which is used */
 250	mutex_lock(&display->hdcp.hdcp_mutex);
 251	if (!display->hdcp.comp_added || !display->hdcp.arbiter) {
 252		mutex_unlock(&display->hdcp.hdcp_mutex);
 253		return false;
 254	}
 255	mutex_unlock(&display->hdcp.hdcp_mutex);
 256
 257	return true;
 258}
 259
 260/* Is HDCP2.2 capable on Platform and Sink */
 261bool intel_hdcp2_get_capability(struct intel_connector *connector)
 262{
 263	struct intel_hdcp *hdcp = &connector->hdcp;
 264	bool capable = false;
 265
 266	if (!intel_hdcp2_prerequisite(connector))
 267		return false;
 268
 269	/* Sink's capability for HDCP2.2 */
 270	hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
 271
 272	return capable;
 273}
 274
 275void intel_hdcp_get_remote_capability(struct intel_connector *connector,
 276				      bool *hdcp_capable,
 277				      bool *hdcp2_capable)
 278{
 279	struct intel_hdcp *hdcp = &connector->hdcp;
 280
 281	if (!hdcp->shim->get_remote_hdcp_capability)
 282		return;
 283
 284	hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
 285					       hdcp2_capable);
 286
 287	if (!intel_hdcp2_prerequisite(connector))
 288		*hdcp2_capable = false;
 289}
 290
 291static bool intel_hdcp_in_use(struct intel_display *display,
 292			      enum transcoder cpu_transcoder, enum port port)
 293{
 294	return intel_de_read(display,
 295			     HDCP_STATUS(display, cpu_transcoder, port)) &
 296		HDCP_STATUS_ENC;
 297}
 298
 299static bool intel_hdcp2_in_use(struct intel_display *display,
 300			       enum transcoder cpu_transcoder, enum port port)
 301{
 302	return intel_de_read(display,
 303			     HDCP2_STATUS(display, cpu_transcoder, port)) &
 304		LINK_ENCRYPTION_STATUS;
 305}
 306
 307static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 308				    const struct intel_hdcp_shim *shim)
 309{
 310	int ret, read_ret;
 311	bool ksv_ready;
 312
 313	/* Poll for ksv list ready (spec says max time allowed is 5s) */
 314	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
 315							 &ksv_ready),
 316			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
 317			 100 * 1000);
 318	if (ret)
 319		return ret;
 320	if (read_ret)
 321		return read_ret;
 322	if (!ksv_ready)
 323		return -ETIMEDOUT;
 324
 325	return 0;
 326}
 327
 328static bool hdcp_key_loadable(struct intel_display *display)
 329{
 330	struct drm_i915_private *i915 = to_i915(display->drm);
 331	enum i915_power_well_id id;
 332	intel_wakeref_t wakeref;
 333	bool enabled = false;
 334
 335	/*
 336	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
 337	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
 338	 */
 339	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 340		id = HSW_DISP_PW_GLOBAL;
 341	else
 342		id = SKL_DISP_PW_1;
 343
 344	/* PG1 (power well #1) needs to be enabled */
 345	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 346		enabled = intel_display_power_well_is_enabled(i915, id);
 347
 348	/*
 349	 * Another req for hdcp key loadability is enabled state of pll for
 350	 * cdclk. Without active crtc we wont land here. So we are assuming that
 351	 * cdclk is already on.
 352	 */
 353
 354	return enabled;
 355}
 356
 357static void intel_hdcp_clear_keys(struct intel_display *display)
 358{
 359	intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
 360	intel_de_write(display, HDCP_KEY_STATUS,
 361		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 362}
 363
 364static int intel_hdcp_load_keys(struct intel_display *display)
 365{
 366	struct drm_i915_private *i915 = to_i915(display->drm);
 367	int ret;
 368	u32 val;
 369
 370	val = intel_de_read(display, HDCP_KEY_STATUS);
 371	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 372		return 0;
 373
 374	/*
 375	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
 376	 * out of reset. So if Key is not already loaded, its an error state.
 377	 */
 378	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 379		if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 380			return -ENXIO;
 381
 382	/*
 383	 * Initiate loading the HDCP key from fuses.
 384	 *
 385	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
 386	 * version 9 platforms (minus BXT) differ in the key load trigger
 387	 * process from other platforms. These platforms use the GT Driver
 388	 * Mailbox interface.
 389	 */
 390	if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) {
 391		ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
 
 392		if (ret) {
 393			drm_err(display->drm,
 394				"Failed to initiate HDCP key load (%d)\n",
 395				ret);
 396			return ret;
 397		}
 398	} else {
 399		intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 400	}
 401
 402	/* Wait for the keys to load (500us) */
 403	ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
 404				   HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 405				   10, 1, &val);
 406	if (ret)
 407		return ret;
 408	else if (!(val & HDCP_KEY_LOAD_STATUS))
 409		return -ENXIO;
 410
 411	/* Send Aksv over to PCH display for use in authentication */
 412	intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 413
 414	return 0;
 415}
 416
 417/* Returns updated SHA-1 index */
 418static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
 419{
 420	intel_de_write(display, HDCP_SHA_TEXT, sha_text);
 421	if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
 422		drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
 423		return -ETIMEDOUT;
 424	}
 425	return 0;
 426}
 427
 428static
 429u32 intel_hdcp_get_repeater_ctl(struct intel_display *display,
 430				enum transcoder cpu_transcoder, enum port port)
 431{
 432	if (DISPLAY_VER(display) >= 12) {
 433		switch (cpu_transcoder) {
 434		case TRANSCODER_A:
 435			return HDCP_TRANSA_REP_PRESENT |
 436			       HDCP_TRANSA_SHA1_M0;
 437		case TRANSCODER_B:
 438			return HDCP_TRANSB_REP_PRESENT |
 439			       HDCP_TRANSB_SHA1_M0;
 440		case TRANSCODER_C:
 441			return HDCP_TRANSC_REP_PRESENT |
 442			       HDCP_TRANSC_SHA1_M0;
 443		case TRANSCODER_D:
 444			return HDCP_TRANSD_REP_PRESENT |
 445			       HDCP_TRANSD_SHA1_M0;
 446		default:
 447			drm_err(display->drm, "Unknown transcoder %d\n",
 448				cpu_transcoder);
 449			return 0;
 450		}
 451	}
 452
 453	switch (port) {
 454	case PORT_A:
 455		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
 456	case PORT_B:
 457		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
 458	case PORT_C:
 459		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
 460	case PORT_D:
 461		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
 462	case PORT_E:
 463		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 464	default:
 465		drm_err(display->drm, "Unknown port %d\n", port);
 466		return 0;
 467	}
 468}
 469
 470static
 471int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 472				const struct intel_hdcp_shim *shim,
 473				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 474{
 475	struct intel_display *display = to_intel_display(connector);
 476	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 477	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 478	enum port port = dig_port->base.port;
 479	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 480	int ret, i, j, sha_idx;
 481
 482	/* Process V' values from the receiver */
 483	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 484		ret = shim->read_v_prime_part(dig_port, i, &vprime);
 485		if (ret)
 486			return ret;
 487		intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime);
 488	}
 489
 490	/*
 491	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
 492	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
 493	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
 494	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
 495	 * index will keep track of our progress through the 64 bytes as well as
 496	 * helping us work the 40-bit KSVs through our 32-bit register.
 497	 *
 498	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
 499	 */
 500	sha_idx = 0;
 501	sha_text = 0;
 502	sha_leftovers = 0;
 503	rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port);
 504	intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 505	for (i = 0; i < num_downstream; i++) {
 506		unsigned int sha_empty;
 507		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
 508
 509		/* Fill up the empty slots in sha_text and write it out */
 510		sha_empty = sizeof(sha_text) - sha_leftovers;
 511		for (j = 0; j < sha_empty; j++) {
 512			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
 513			sha_text |= ksv[j] << off;
 514		}
 515
 516		ret = intel_write_sha_text(display, sha_text);
 517		if (ret < 0)
 518			return ret;
 519
 520		/* Programming guide writes this every 64 bytes */
 521		sha_idx += sizeof(sha_text);
 522		if (!(sha_idx % 64))
 523			intel_de_write(display, HDCP_REP_CTL,
 524				       rep_ctl | HDCP_SHA1_TEXT_32);
 525
 526		/* Store the leftover bytes from the ksv in sha_text */
 527		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
 528		sha_text = 0;
 529		for (j = 0; j < sha_leftovers; j++)
 530			sha_text |= ksv[sha_empty + j] <<
 531					((sizeof(sha_text) - j - 1) * 8);
 532
 533		/*
 534		 * If we still have room in sha_text for more data, continue.
 535		 * Otherwise, write it out immediately.
 536		 */
 537		if (sizeof(sha_text) > sha_leftovers)
 538			continue;
 539
 540		ret = intel_write_sha_text(display, sha_text);
 541		if (ret < 0)
 542			return ret;
 543		sha_leftovers = 0;
 544		sha_text = 0;
 545		sha_idx += sizeof(sha_text);
 546	}
 547
 548	/*
 549	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
 550	 * bytes are leftover from the last ksv, we might be able to fit them
 551	 * all in sha_text (first 2 cases), or we might need to split them up
 552	 * into 2 writes (last 2 cases).
 553	 */
 554	if (sha_leftovers == 0) {
 555		/* Write 16 bits of text, 16 bits of M0 */
 556		intel_de_write(display, HDCP_REP_CTL,
 557			       rep_ctl | HDCP_SHA1_TEXT_16);
 558		ret = intel_write_sha_text(display,
 559					   bstatus[0] << 8 | bstatus[1]);
 560		if (ret < 0)
 561			return ret;
 562		sha_idx += sizeof(sha_text);
 563
 564		/* Write 32 bits of M0 */
 565		intel_de_write(display, HDCP_REP_CTL,
 566			       rep_ctl | HDCP_SHA1_TEXT_0);
 567		ret = intel_write_sha_text(display, 0);
 568		if (ret < 0)
 569			return ret;
 570		sha_idx += sizeof(sha_text);
 571
 572		/* Write 16 bits of M0 */
 573		intel_de_write(display, HDCP_REP_CTL,
 574			       rep_ctl | HDCP_SHA1_TEXT_16);
 575		ret = intel_write_sha_text(display, 0);
 576		if (ret < 0)
 577			return ret;
 578		sha_idx += sizeof(sha_text);
 579
 580	} else if (sha_leftovers == 1) {
 581		/* Write 24 bits of text, 8 bits of M0 */
 582		intel_de_write(display, HDCP_REP_CTL,
 583			       rep_ctl | HDCP_SHA1_TEXT_24);
 584		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 585		/* Only 24-bits of data, must be in the LSB */
 586		sha_text = (sha_text & 0xffffff00) >> 8;
 587		ret = intel_write_sha_text(display, sha_text);
 588		if (ret < 0)
 589			return ret;
 590		sha_idx += sizeof(sha_text);
 591
 592		/* Write 32 bits of M0 */
 593		intel_de_write(display, HDCP_REP_CTL,
 594			       rep_ctl | HDCP_SHA1_TEXT_0);
 595		ret = intel_write_sha_text(display, 0);
 596		if (ret < 0)
 597			return ret;
 598		sha_idx += sizeof(sha_text);
 599
 600		/* Write 24 bits of M0 */
 601		intel_de_write(display, HDCP_REP_CTL,
 602			       rep_ctl | HDCP_SHA1_TEXT_8);
 603		ret = intel_write_sha_text(display, 0);
 604		if (ret < 0)
 605			return ret;
 606		sha_idx += sizeof(sha_text);
 607
 608	} else if (sha_leftovers == 2) {
 609		/* Write 32 bits of text */
 610		intel_de_write(display, HDCP_REP_CTL,
 611			       rep_ctl | HDCP_SHA1_TEXT_32);
 612		sha_text |= bstatus[0] << 8 | bstatus[1];
 613		ret = intel_write_sha_text(display, sha_text);
 614		if (ret < 0)
 615			return ret;
 616		sha_idx += sizeof(sha_text);
 617
 618		/* Write 64 bits of M0 */
 619		intel_de_write(display, HDCP_REP_CTL,
 620			       rep_ctl | HDCP_SHA1_TEXT_0);
 621		for (i = 0; i < 2; i++) {
 622			ret = intel_write_sha_text(display, 0);
 623			if (ret < 0)
 624				return ret;
 625			sha_idx += sizeof(sha_text);
 626		}
 627
 628		/*
 629		 * Terminate the SHA-1 stream by hand. For the other leftover
 630		 * cases this is appended by the hardware.
 631		 */
 632		intel_de_write(display, HDCP_REP_CTL,
 633			       rep_ctl | HDCP_SHA1_TEXT_32);
 634		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
 635		ret = intel_write_sha_text(display, sha_text);
 636		if (ret < 0)
 637			return ret;
 638		sha_idx += sizeof(sha_text);
 639	} else if (sha_leftovers == 3) {
 640		/* Write 32 bits of text (filled from LSB) */
 641		intel_de_write(display, HDCP_REP_CTL,
 642			       rep_ctl | HDCP_SHA1_TEXT_32);
 643		sha_text |= bstatus[0];
 644		ret = intel_write_sha_text(display, sha_text);
 645		if (ret < 0)
 646			return ret;
 647		sha_idx += sizeof(sha_text);
 648
 649		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
 650		intel_de_write(display, HDCP_REP_CTL,
 651			       rep_ctl | HDCP_SHA1_TEXT_8);
 652		ret = intel_write_sha_text(display, bstatus[1]);
 653		if (ret < 0)
 654			return ret;
 655		sha_idx += sizeof(sha_text);
 656
 657		/* Write 32 bits of M0 */
 658		intel_de_write(display, HDCP_REP_CTL,
 659			       rep_ctl | HDCP_SHA1_TEXT_0);
 660		ret = intel_write_sha_text(display, 0);
 661		if (ret < 0)
 662			return ret;
 663		sha_idx += sizeof(sha_text);
 664
 665		/* Write 8 bits of M0 */
 666		intel_de_write(display, HDCP_REP_CTL,
 667			       rep_ctl | HDCP_SHA1_TEXT_24);
 668		ret = intel_write_sha_text(display, 0);
 669		if (ret < 0)
 670			return ret;
 671		sha_idx += sizeof(sha_text);
 672	} else {
 673		drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n",
 674			    sha_leftovers);
 675		return -EINVAL;
 676	}
 677
 678	intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 679	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 680	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 681		ret = intel_write_sha_text(display, 0);
 682		if (ret < 0)
 683			return ret;
 684		sha_idx += sizeof(sha_text);
 685	}
 686
 687	/*
 688	 * Last write gets the length of the concatenation in bits. That is:
 689	 *  - 5 bytes per device
 690	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
 691	 */
 692	sha_text = (num_downstream * 5 + 10) * 8;
 693	ret = intel_write_sha_text(display, sha_text);
 694	if (ret < 0)
 695		return ret;
 696
 697	/* Tell the HW we're done with the hash and wait for it to ACK */
 698	intel_de_write(display, HDCP_REP_CTL,
 699		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 700	if (intel_de_wait_for_set(display, HDCP_REP_CTL,
 701				  HDCP_SHA1_COMPLETE, 1)) {
 702		drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
 703		return -ETIMEDOUT;
 704	}
 705	if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
 706		drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n");
 707		return -ENXIO;
 708	}
 709
 710	return 0;
 711}
 712
 713/* Implements Part 2 of the HDCP authorization procedure */
 714static
 715int intel_hdcp_auth_downstream(struct intel_connector *connector)
 716{
 717	struct intel_display *display = to_intel_display(connector);
 718	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 719	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 720	u8 bstatus[2], num_downstream, *ksv_fifo;
 721	int ret, i, tries = 3;
 722
 723	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
 724	if (ret) {
 725		drm_dbg_kms(display->drm,
 726			    "KSV list failed to become ready (%d)\n", ret);
 727		return ret;
 728	}
 729
 730	ret = shim->read_bstatus(dig_port, bstatus);
 731	if (ret)
 732		return ret;
 733
 734	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 735	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
 736		drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n");
 737		return -EPERM;
 738	}
 739
 740	/*
 741	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
 742	 * the HDCP encryption. That implies that repeater can't have its own
 743	 * display. As there is no consumption of encrypted content in the
 744	 * repeater with 0 downstream devices, we are failing the
 745	 * authentication.
 746	 */
 747	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 748	if (num_downstream == 0) {
 749		drm_dbg_kms(display->drm,
 750			    "Repeater with zero downstream devices\n");
 751		return -EINVAL;
 752	}
 753
 754	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 755	if (!ksv_fifo) {
 756		drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n");
 757		return -ENOMEM;
 758	}
 759
 760	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
 761	if (ret)
 762		goto err;
 763
 764	if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo,
 765					num_downstream) > 0) {
 766		drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n");
 767		ret = -EPERM;
 768		goto err;
 769	}
 770
 771	/*
 772	 * When V prime mismatches, DP Spec mandates re-read of
 773	 * V prime atleast twice.
 774	 */
 775	for (i = 0; i < tries; i++) {
 776		ret = intel_hdcp_validate_v_prime(connector, shim,
 777						  ksv_fifo, num_downstream,
 778						  bstatus);
 779		if (!ret)
 780			break;
 781	}
 782
 783	if (i == tries) {
 784		drm_dbg_kms(display->drm,
 785			    "V Prime validation failed.(%d)\n", ret);
 786		goto err;
 787	}
 788
 789	drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n",
 790		    num_downstream);
 791	ret = 0;
 792err:
 793	kfree(ksv_fifo);
 794	return ret;
 795}
 796
 797/* Implements Part 1 of the HDCP authorization procedure */
 798static int intel_hdcp_auth(struct intel_connector *connector)
 799{
 800	struct intel_display *display = to_intel_display(connector);
 801	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 802	struct intel_hdcp *hdcp = &connector->hdcp;
 803	const struct intel_hdcp_shim *shim = hdcp->shim;
 804	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 805	enum port port = dig_port->base.port;
 806	unsigned long r0_prime_gen_start;
 807	int ret, i, tries = 2;
 808	union {
 809		u32 reg[2];
 810		u8 shim[DRM_HDCP_AN_LEN];
 811	} an;
 812	union {
 813		u32 reg[2];
 814		u8 shim[DRM_HDCP_KSV_LEN];
 815	} bksv;
 816	union {
 817		u32 reg;
 818		u8 shim[DRM_HDCP_RI_LEN];
 819	} ri;
 820	bool repeater_present, hdcp_capable;
 821
 822	/*
 823	 * Detects whether the display is HDCP capable. Although we check for
 824	 * valid Bksv below, the HDCP over DP spec requires that we check
 825	 * whether the display supports HDCP before we write An. For HDMI
 826	 * displays, this is not necessary.
 827	 */
 828	if (shim->hdcp_get_capability) {
 829		ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
 830		if (ret)
 831			return ret;
 832		if (!hdcp_capable) {
 833			drm_dbg_kms(display->drm,
 834				    "Panel is not HDCP capable\n");
 835			return -EINVAL;
 836		}
 837	}
 838
 839	/* Initialize An with 2 random values and acquire it */
 840	for (i = 0; i < 2; i++)
 841		intel_de_write(display,
 842			       HDCP_ANINIT(display, cpu_transcoder, port),
 843			       get_random_u32());
 844	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
 845		       HDCP_CONF_CAPTURE_AN);
 846
 847	/* Wait for An to be acquired */
 848	if (intel_de_wait_for_set(display,
 849				  HDCP_STATUS(display, cpu_transcoder, port),
 850				  HDCP_STATUS_AN_READY, 1)) {
 851		drm_err(display->drm, "Timed out waiting for An\n");
 852		return -ETIMEDOUT;
 853	}
 854
 855	an.reg[0] = intel_de_read(display,
 856				  HDCP_ANLO(display, cpu_transcoder, port));
 857	an.reg[1] = intel_de_read(display,
 858				  HDCP_ANHI(display, cpu_transcoder, port));
 859	ret = shim->write_an_aksv(dig_port, an.shim);
 860	if (ret)
 861		return ret;
 862
 863	r0_prime_gen_start = jiffies;
 864
 865	memset(&bksv, 0, sizeof(bksv));
 866
 867	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
 868	if (ret < 0)
 869		return ret;
 870
 871	if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) {
 872		drm_err(display->drm, "BKSV is revoked\n");
 873		return -EPERM;
 874	}
 875
 876	intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port),
 877		       bksv.reg[0]);
 878	intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port),
 879		       bksv.reg[1]);
 880
 881	ret = shim->repeater_present(dig_port, &repeater_present);
 882	if (ret)
 883		return ret;
 884	if (repeater_present)
 885		intel_de_write(display, HDCP_REP_CTL,
 886			       intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port));
 887
 888	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
 889	if (ret)
 890		return ret;
 891
 892	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
 893		       HDCP_CONF_AUTH_AND_ENC);
 894
 895	/* Wait for R0 ready */
 896	if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
 897		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
 898		drm_err(display->drm, "Timed out waiting for R0 ready\n");
 899		return -ETIMEDOUT;
 900	}
 901
 902	/*
 903	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
 904	 * some monitors can take longer than this. We'll set the timeout at
 905	 * 300ms just to be sure.
 906	 *
 907	 * On DP, there's an R0_READY bit available but no such bit
 908	 * exists on HDMI. Since the upper-bound is the same, we'll just do
 909	 * the stupid thing instead of polling on one and not the other.
 910	 */
 911	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 912
 913	tries = 3;
 914
 915	/*
 916	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
 917	 * of R0 mismatch.
 918	 */
 919	for (i = 0; i < tries; i++) {
 920		ri.reg = 0;
 921		ret = shim->read_ri_prime(dig_port, ri.shim);
 922		if (ret)
 923			return ret;
 924		intel_de_write(display,
 925			       HDCP_RPRIME(display, cpu_transcoder, port),
 926			       ri.reg);
 927
 928		/* Wait for Ri prime match */
 929		if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
 930			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 931			break;
 932	}
 933
 934	if (i == tries) {
 935		drm_dbg_kms(display->drm,
 936			    "Timed out waiting for Ri prime match (%x)\n",
 937			    intel_de_read(display,
 938					  HDCP_STATUS(display, cpu_transcoder, port)));
 939		return -ETIMEDOUT;
 940	}
 941
 942	/* Wait for encryption confirmation */
 943	if (intel_de_wait_for_set(display,
 944				  HDCP_STATUS(display, cpu_transcoder, port),
 945				  HDCP_STATUS_ENC,
 946				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 947		drm_err(display->drm, "Timed out waiting for encryption\n");
 948		return -ETIMEDOUT;
 949	}
 950
 951	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
 952	if (shim->stream_encryption) {
 953		ret = shim->stream_encryption(connector, true);
 954		if (ret) {
 955			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
 956				connector->base.base.id, connector->base.name);
 957			return ret;
 958		}
 959		drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
 960			    transcoder_name(hdcp->stream_transcoder));
 961	}
 962
 963	if (repeater_present)
 964		return intel_hdcp_auth_downstream(connector);
 965
 966	drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n");
 967	return 0;
 968}
 969
 970static int _intel_hdcp_disable(struct intel_connector *connector)
 971{
 972	struct intel_display *display = to_intel_display(connector);
 973	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 974	struct intel_hdcp *hdcp = &connector->hdcp;
 975	enum port port = dig_port->base.port;
 976	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 977	u32 repeater_ctl;
 978	int ret;
 979
 980	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
 981		    connector->base.base.id, connector->base.name);
 982
 983	if (hdcp->shim->stream_encryption) {
 984		ret = hdcp->shim->stream_encryption(connector, false);
 985		if (ret) {
 986			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
 987				connector->base.base.id, connector->base.name);
 988			return ret;
 989		}
 990		drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
 991			    transcoder_name(hdcp->stream_transcoder));
 992		/*
 993		 * If there are other connectors on this port using HDCP,
 994		 * don't disable it until it disabled HDCP encryption for
 995		 * all connectors in MST topology.
 996		 */
 997		if (dig_port->num_hdcp_streams > 0)
 998			return 0;
 999	}
1000
1001	hdcp->hdcp_encrypted = false;
1002	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
1003	if (intel_de_wait_for_clear(display,
1004				    HDCP_STATUS(display, cpu_transcoder, port),
1005				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1006		drm_err(display->drm,
1007			"Failed to disable HDCP, timeout clearing status\n");
1008		return -ETIMEDOUT;
1009	}
1010
1011	repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder,
1012						   port);
1013	intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0);
 
1014
1015	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1016	if (ret) {
1017		drm_err(display->drm, "Failed to disable HDCP signalling\n");
1018		return ret;
1019	}
1020
1021	drm_dbg_kms(display->drm, "HDCP is disabled\n");
1022	return 0;
1023}
1024
1025static int intel_hdcp1_enable(struct intel_connector *connector)
1026{
1027	struct intel_display *display = to_intel_display(connector);
1028	struct intel_hdcp *hdcp = &connector->hdcp;
1029	int i, ret, tries = 3;
1030
1031	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1032		    connector->base.base.id, connector->base.name);
1033
1034	if (!hdcp_key_loadable(display)) {
1035		drm_err(display->drm, "HDCP key Load is not possible\n");
1036		return -ENXIO;
1037	}
1038
1039	for (i = 0; i < KEY_LOAD_TRIES; i++) {
1040		ret = intel_hdcp_load_keys(display);
1041		if (!ret)
1042			break;
1043		intel_hdcp_clear_keys(display);
1044	}
1045	if (ret) {
1046		drm_err(display->drm, "Could not load HDCP keys, (%d)\n",
1047			ret);
1048		return ret;
1049	}
1050
1051	/* Incase of authentication failures, HDCP spec expects reauth. */
1052	for (i = 0; i < tries; i++) {
1053		ret = intel_hdcp_auth(connector);
1054		if (!ret) {
1055			hdcp->hdcp_encrypted = true;
1056			return 0;
1057		}
1058
1059		drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret);
1060
1061		/* Ensuring HDCP encryption and signalling are stopped. */
1062		_intel_hdcp_disable(connector);
1063	}
1064
1065	drm_dbg_kms(display->drm,
1066		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1067	return ret;
1068}
1069
1070static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1071{
1072	return container_of(hdcp, struct intel_connector, hdcp);
1073}
1074
1075static void intel_hdcp_update_value(struct intel_connector *connector,
1076				    u64 value, bool update_property)
1077{
1078	struct intel_display *display = to_intel_display(connector);
1079	struct drm_i915_private *i915 = to_i915(display->drm);
1080	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1081	struct intel_hdcp *hdcp = &connector->hdcp;
1082
1083	drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex));
1084
1085	if (hdcp->value == value)
1086		return;
1087
1088	drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex));
1089
1090	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1091		if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0))
1092			dig_port->num_hdcp_streams--;
1093	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1094		dig_port->num_hdcp_streams++;
1095	}
1096
1097	hdcp->value = value;
1098	if (update_property) {
1099		drm_connector_get(&connector->base);
1100		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
1101			drm_connector_put(&connector->base);
1102	}
1103}
1104
1105/* Implements Part 3 of the HDCP authorization procedure */
1106static int intel_hdcp_check_link(struct intel_connector *connector)
1107{
1108	struct intel_display *display = to_intel_display(connector);
1109	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1110	struct intel_hdcp *hdcp = &connector->hdcp;
1111	enum port port = dig_port->base.port;
1112	enum transcoder cpu_transcoder;
1113	int ret = 0;
1114
1115	mutex_lock(&hdcp->mutex);
1116	mutex_lock(&dig_port->hdcp_mutex);
1117
1118	cpu_transcoder = hdcp->cpu_transcoder;
1119
1120	/* Check_link valid only when HDCP1.4 is enabled */
1121	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1122	    !hdcp->hdcp_encrypted) {
1123		ret = -EINVAL;
1124		goto out;
1125	}
1126
1127	if (drm_WARN_ON(display->drm,
1128			!intel_hdcp_in_use(display, cpu_transcoder, port))) {
1129		drm_err(display->drm,
1130			"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1131			connector->base.base.id, connector->base.name,
1132			intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)));
1133		ret = -ENXIO;
1134		intel_hdcp_update_value(connector,
1135					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1136					true);
1137		goto out;
1138	}
1139
1140	if (hdcp->shim->check_link(dig_port, connector)) {
1141		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1142			intel_hdcp_update_value(connector,
1143				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1144		}
1145		goto out;
1146	}
1147
1148	drm_dbg_kms(display->drm,
1149		    "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1150		    connector->base.base.id, connector->base.name);
1151
1152	ret = _intel_hdcp_disable(connector);
1153	if (ret) {
1154		drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret);
1155		intel_hdcp_update_value(connector,
1156					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1157					true);
1158		goto out;
1159	}
1160
1161	ret = intel_hdcp1_enable(connector);
1162	if (ret) {
1163		drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
1164		intel_hdcp_update_value(connector,
1165					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1166					true);
1167		goto out;
1168	}
1169
1170out:
1171	mutex_unlock(&dig_port->hdcp_mutex);
1172	mutex_unlock(&hdcp->mutex);
1173	return ret;
1174}
1175
1176static void intel_hdcp_prop_work(struct work_struct *work)
1177{
1178	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1179					       prop_work);
1180	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1181	struct intel_display *display = to_intel_display(connector);
1182
1183	drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
1184	mutex_lock(&hdcp->mutex);
1185
1186	/*
1187	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1188	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1189	 * we're running just after hdcp has been disabled, so just exit
1190	 */
1191	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1192		drm_hdcp_update_content_protection(&connector->base,
1193						   hdcp->value);
1194
1195	mutex_unlock(&hdcp->mutex);
1196	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1197
1198	drm_connector_put(&connector->base);
1199}
1200
1201bool is_hdcp_supported(struct intel_display *display, enum port port)
1202{
1203	return DISPLAY_RUNTIME_INFO(display)->has_hdcp &&
1204		(DISPLAY_VER(display) >= 12 || port < PORT_E);
1205}
1206
1207static int
1208hdcp2_prepare_ake_init(struct intel_connector *connector,
1209		       struct hdcp2_ake_init *ake_data)
1210{
1211	struct intel_display *display = to_intel_display(connector);
1212	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1213	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1214	struct i915_hdcp_arbiter *arbiter;
 
1215	int ret;
1216
1217	mutex_lock(&display->hdcp.hdcp_mutex);
1218	arbiter = display->hdcp.arbiter;
1219
1220	if (!arbiter || !arbiter->ops) {
1221		mutex_unlock(&display->hdcp.hdcp_mutex);
1222		return -EINVAL;
1223	}
1224
1225	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1226	if (ret)
1227		drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n",
1228			    ret);
1229	mutex_unlock(&display->hdcp.hdcp_mutex);
1230
1231	return ret;
1232}
1233
1234static int
1235hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1236				struct hdcp2_ake_send_cert *rx_cert,
1237				bool *paired,
1238				struct hdcp2_ake_no_stored_km *ek_pub_km,
1239				size_t *msg_sz)
1240{
1241	struct intel_display *display = to_intel_display(connector);
1242	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1243	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1244	struct i915_hdcp_arbiter *arbiter;
 
1245	int ret;
1246
1247	mutex_lock(&display->hdcp.hdcp_mutex);
1248	arbiter = display->hdcp.arbiter;
1249
1250	if (!arbiter || !arbiter->ops) {
1251		mutex_unlock(&display->hdcp.hdcp_mutex);
1252		return -EINVAL;
1253	}
1254
1255	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1256							 rx_cert, paired,
1257							 ek_pub_km, msg_sz);
1258	if (ret < 0)
1259		drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n",
1260			    ret);
1261	mutex_unlock(&display->hdcp.hdcp_mutex);
1262
1263	return ret;
1264}
1265
1266static int hdcp2_verify_hprime(struct intel_connector *connector,
1267			       struct hdcp2_ake_send_hprime *rx_hprime)
1268{
1269	struct intel_display *display = to_intel_display(connector);
1270	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1271	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1272	struct i915_hdcp_arbiter *arbiter;
 
1273	int ret;
1274
1275	mutex_lock(&display->hdcp.hdcp_mutex);
1276	arbiter = display->hdcp.arbiter;
1277
1278	if (!arbiter || !arbiter->ops) {
1279		mutex_unlock(&display->hdcp.hdcp_mutex);
1280		return -EINVAL;
1281	}
1282
1283	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1284	if (ret < 0)
1285		drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret);
1286	mutex_unlock(&display->hdcp.hdcp_mutex);
1287
1288	return ret;
1289}
1290
1291static int
1292hdcp2_store_pairing_info(struct intel_connector *connector,
1293			 struct hdcp2_ake_send_pairing_info *pairing_info)
1294{
1295	struct intel_display *display = to_intel_display(connector);
1296	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1297	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1298	struct i915_hdcp_arbiter *arbiter;
 
1299	int ret;
1300
1301	mutex_lock(&display->hdcp.hdcp_mutex);
1302	arbiter = display->hdcp.arbiter;
1303
1304	if (!arbiter || !arbiter->ops) {
1305		mutex_unlock(&display->hdcp.hdcp_mutex);
1306		return -EINVAL;
1307	}
1308
1309	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1310	if (ret < 0)
1311		drm_dbg_kms(display->drm, "Store pairing info failed. %d\n",
1312			    ret);
1313	mutex_unlock(&display->hdcp.hdcp_mutex);
1314
1315	return ret;
1316}
1317
1318static int
1319hdcp2_prepare_lc_init(struct intel_connector *connector,
1320		      struct hdcp2_lc_init *lc_init)
1321{
1322	struct intel_display *display = to_intel_display(connector);
1323	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1324	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1325	struct i915_hdcp_arbiter *arbiter;
 
1326	int ret;
1327
1328	mutex_lock(&display->hdcp.hdcp_mutex);
1329	arbiter = display->hdcp.arbiter;
1330
1331	if (!arbiter || !arbiter->ops) {
1332		mutex_unlock(&display->hdcp.hdcp_mutex);
1333		return -EINVAL;
1334	}
1335
1336	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1337	if (ret < 0)
1338		drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n",
1339			    ret);
1340	mutex_unlock(&display->hdcp.hdcp_mutex);
1341
1342	return ret;
1343}
1344
1345static int
1346hdcp2_verify_lprime(struct intel_connector *connector,
1347		    struct hdcp2_lc_send_lprime *rx_lprime)
1348{
1349	struct intel_display *display = to_intel_display(connector);
1350	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1351	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1352	struct i915_hdcp_arbiter *arbiter;
 
1353	int ret;
1354
1355	mutex_lock(&display->hdcp.hdcp_mutex);
1356	arbiter = display->hdcp.arbiter;
1357
1358	if (!arbiter || !arbiter->ops) {
1359		mutex_unlock(&display->hdcp.hdcp_mutex);
1360		return -EINVAL;
1361	}
1362
1363	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1364	if (ret < 0)
1365		drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n",
1366			    ret);
1367	mutex_unlock(&display->hdcp.hdcp_mutex);
1368
1369	return ret;
1370}
1371
1372static int hdcp2_prepare_skey(struct intel_connector *connector,
1373			      struct hdcp2_ske_send_eks *ske_data)
1374{
1375	struct intel_display *display = to_intel_display(connector);
1376	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1377	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1378	struct i915_hdcp_arbiter *arbiter;
 
1379	int ret;
1380
1381	mutex_lock(&display->hdcp.hdcp_mutex);
1382	arbiter = display->hdcp.arbiter;
1383
1384	if (!arbiter || !arbiter->ops) {
1385		mutex_unlock(&display->hdcp.hdcp_mutex);
1386		return -EINVAL;
1387	}
1388
1389	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1390	if (ret < 0)
1391		drm_dbg_kms(display->drm, "Get session key failed. %d\n",
1392			    ret);
1393	mutex_unlock(&display->hdcp.hdcp_mutex);
1394
1395	return ret;
1396}
1397
1398static int
1399hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1400				      struct hdcp2_rep_send_receiverid_list
1401								*rep_topology,
1402				      struct hdcp2_rep_send_ack *rep_send_ack)
1403{
1404	struct intel_display *display = to_intel_display(connector);
1405	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1406	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1407	struct i915_hdcp_arbiter *arbiter;
 
1408	int ret;
1409
1410	mutex_lock(&display->hdcp.hdcp_mutex);
1411	arbiter = display->hdcp.arbiter;
1412
1413	if (!arbiter || !arbiter->ops) {
1414		mutex_unlock(&display->hdcp.hdcp_mutex);
1415		return -EINVAL;
1416	}
1417
1418	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1419							    data,
1420							    rep_topology,
1421							    rep_send_ack);
1422	if (ret < 0)
1423		drm_dbg_kms(display->drm,
1424			    "Verify rep topology failed. %d\n", ret);
1425	mutex_unlock(&display->hdcp.hdcp_mutex);
1426
1427	return ret;
1428}
1429
1430static int
1431hdcp2_verify_mprime(struct intel_connector *connector,
1432		    struct hdcp2_rep_stream_ready *stream_ready)
1433{
1434	struct intel_display *display = to_intel_display(connector);
1435	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1436	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1437	struct i915_hdcp_arbiter *arbiter;
 
1438	int ret;
1439
1440	mutex_lock(&display->hdcp.hdcp_mutex);
1441	arbiter = display->hdcp.arbiter;
1442
1443	if (!arbiter || !arbiter->ops) {
1444		mutex_unlock(&display->hdcp.hdcp_mutex);
1445		return -EINVAL;
1446	}
1447
1448	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1449	if (ret < 0)
1450		drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret);
1451	mutex_unlock(&display->hdcp.hdcp_mutex);
1452
1453	return ret;
1454}
1455
1456static int hdcp2_authenticate_port(struct intel_connector *connector)
1457{
1458	struct intel_display *display = to_intel_display(connector);
1459	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1460	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1461	struct i915_hdcp_arbiter *arbiter;
 
1462	int ret;
1463
1464	mutex_lock(&display->hdcp.hdcp_mutex);
1465	arbiter = display->hdcp.arbiter;
1466
1467	if (!arbiter || !arbiter->ops) {
1468		mutex_unlock(&display->hdcp.hdcp_mutex);
1469		return -EINVAL;
1470	}
1471
1472	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1473	if (ret < 0)
1474		drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n",
1475			    ret);
1476	mutex_unlock(&display->hdcp.hdcp_mutex);
1477
1478	return ret;
1479}
1480
1481static int hdcp2_close_session(struct intel_connector *connector)
1482{
1483	struct intel_display *display = to_intel_display(connector);
1484	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1485	struct i915_hdcp_arbiter *arbiter;
 
1486	int ret;
1487
1488	mutex_lock(&display->hdcp.hdcp_mutex);
1489	arbiter = display->hdcp.arbiter;
1490
1491	if (!arbiter || !arbiter->ops) {
1492		mutex_unlock(&display->hdcp.hdcp_mutex);
1493		return -EINVAL;
1494	}
1495
1496	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1497					     &dig_port->hdcp_port_data);
1498	mutex_unlock(&display->hdcp.hdcp_mutex);
1499
1500	return ret;
1501}
1502
1503static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1504{
1505	return hdcp2_close_session(connector);
1506}
1507
1508/* Authentication flow starts from here */
1509static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1510{
1511	struct intel_display *display = to_intel_display(connector);
1512	struct intel_digital_port *dig_port =
1513		intel_attached_dig_port(connector);
1514	struct intel_hdcp *hdcp = &connector->hdcp;
1515	union {
1516		struct hdcp2_ake_init ake_init;
1517		struct hdcp2_ake_send_cert send_cert;
1518		struct hdcp2_ake_no_stored_km no_stored_km;
1519		struct hdcp2_ake_send_hprime send_hprime;
1520		struct hdcp2_ake_send_pairing_info pairing_info;
1521	} msgs;
1522	const struct intel_hdcp_shim *shim = hdcp->shim;
1523	size_t size;
1524	int ret, i, max_retries;
1525
1526	/* Init for seq_num */
1527	hdcp->seq_num_v = 0;
1528	hdcp->seq_num_m = 0;
1529
1530	if (intel_encoder_is_dp(&dig_port->base) ||
1531	    intel_encoder_is_mst(&dig_port->base))
1532		max_retries = 10;
1533	else
1534		max_retries = 1;
1535
1536	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1537	if (ret < 0)
1538		return ret;
1539
1540	/*
1541	 * Retry the first read and write to downstream at least 10 times
1542	 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
1543	 * (dock decides to stop advertising hdcp2 capability for some reason).
1544	 * The reason being that during suspend resume dock usually keeps the
1545	 * HDCP2 registers inaccesible causing AUX error. This wouldn't be a
1546	 * big problem if the userspace just kept retrying with some delay while
1547	 * it continues to play low value content but most userpace applications
1548	 * end up throwing an error when it receives one from KMD. This makes
1549	 * sure we give the dock and the sink devices to complete its power cycle
1550	 * and then try HDCP authentication. The values of 10 and delay of 50ms
1551	 * was decided based on multiple trial and errors.
1552	 */
1553	for (i = 0; i < max_retries; i++) {
1554		if (!intel_hdcp2_get_capability(connector)) {
1555			msleep(50);
1556			continue;
1557		}
1558
1559		ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1560					  sizeof(msgs.ake_init));
1561		if (ret < 0)
1562			continue;
1563
1564		ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1565					 &msgs.send_cert, sizeof(msgs.send_cert));
1566		if (ret > 0)
1567			break;
1568	}
1569
 
 
1570	if (ret < 0)
1571		return ret;
1572
1573	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1574		drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n");
1575		return -EINVAL;
1576	}
1577
1578	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1579
1580	if (drm_hdcp_check_ksvs_revoked(display->drm,
1581					msgs.send_cert.cert_rx.receiver_id,
1582					1) > 0) {
1583		drm_err(display->drm, "Receiver ID is revoked\n");
1584		return -EPERM;
1585	}
1586
1587	/*
1588	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1589	 * stored also.
1590	 */
1591	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1592					      &hdcp->is_paired,
1593					      &msgs.no_stored_km, &size);
1594	if (ret < 0)
1595		return ret;
1596
1597	ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1598	if (ret < 0)
1599		return ret;
1600
1601	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1602				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1603	if (ret < 0)
1604		return ret;
1605
1606	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1607	if (ret < 0)
1608		return ret;
1609
1610	if (!hdcp->is_paired) {
1611		/* Pairing is required */
1612		ret = shim->read_2_2_msg(connector,
1613					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1614					 &msgs.pairing_info,
1615					 sizeof(msgs.pairing_info));
1616		if (ret < 0)
1617			return ret;
1618
1619		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1620		if (ret < 0)
1621			return ret;
1622		hdcp->is_paired = true;
1623	}
1624
1625	return 0;
1626}
1627
1628static int hdcp2_locality_check(struct intel_connector *connector)
1629{
 
1630	struct intel_hdcp *hdcp = &connector->hdcp;
1631	union {
1632		struct hdcp2_lc_init lc_init;
1633		struct hdcp2_lc_send_lprime send_lprime;
1634	} msgs;
1635	const struct intel_hdcp_shim *shim = hdcp->shim;
1636	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1637
1638	for (i = 0; i < tries; i++) {
1639		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1640		if (ret < 0)
1641			continue;
1642
1643		ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1644				      sizeof(msgs.lc_init));
1645		if (ret < 0)
1646			continue;
1647
1648		ret = shim->read_2_2_msg(connector,
1649					 HDCP_2_2_LC_SEND_LPRIME,
1650					 &msgs.send_lprime,
1651					 sizeof(msgs.send_lprime));
1652		if (ret < 0)
1653			continue;
1654
1655		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1656		if (!ret)
1657			break;
1658	}
1659
1660	return ret;
1661}
1662
1663static int hdcp2_session_key_exchange(struct intel_connector *connector)
1664{
 
1665	struct intel_hdcp *hdcp = &connector->hdcp;
1666	struct hdcp2_ske_send_eks send_eks;
1667	int ret;
1668
1669	ret = hdcp2_prepare_skey(connector, &send_eks);
1670	if (ret < 0)
1671		return ret;
1672
1673	ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1674					sizeof(send_eks));
1675	if (ret < 0)
1676		return ret;
1677
1678	return 0;
1679}
1680
1681static
1682int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1683{
1684	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1685	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1686	struct intel_hdcp *hdcp = &connector->hdcp;
1687	union {
1688		struct hdcp2_rep_stream_manage stream_manage;
1689		struct hdcp2_rep_stream_ready stream_ready;
1690	} msgs;
1691	const struct intel_hdcp_shim *shim = hdcp->shim;
1692	int ret, streams_size_delta, i;
1693
1694	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1695		return -ERANGE;
1696
1697	/* Prepare RepeaterAuth_Stream_Manage msg */
1698	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1699	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1700
1701	msgs.stream_manage.k = cpu_to_be16(data->k);
1702
1703	for (i = 0; i < data->k; i++) {
1704		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1705		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1706	}
1707
1708	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1709				sizeof(struct hdcp2_streamid_type);
1710	/* Send it to Repeater */
1711	ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1712				  sizeof(msgs.stream_manage) - streams_size_delta);
1713	if (ret < 0)
1714		goto out;
1715
1716	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1717				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1718	if (ret < 0)
1719		goto out;
1720
1721	data->seq_num_m = hdcp->seq_num_m;
1722
1723	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1724
1725out:
1726	hdcp->seq_num_m++;
1727
1728	return ret;
1729}
1730
1731static
1732int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1733{
1734	struct intel_display *display = to_intel_display(connector);
1735	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1736	struct intel_hdcp *hdcp = &connector->hdcp;
1737	union {
1738		struct hdcp2_rep_send_receiverid_list recvid_list;
1739		struct hdcp2_rep_send_ack rep_ack;
1740	} msgs;
1741	const struct intel_hdcp_shim *shim = hdcp->shim;
1742	u32 seq_num_v, device_cnt;
1743	u8 *rx_info;
1744	int ret;
1745
1746	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1747				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1748	if (ret < 0)
1749		return ret;
1750
1751	rx_info = msgs.recvid_list.rx_info;
1752
1753	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1754	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1755		drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n");
1756		return -EINVAL;
1757	}
1758
1759	/*
1760	 * MST topology is not Type 1 capable if it contains a downstream
1761	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1762	 */
1763	dig_port->hdcp_mst_type1_capable =
1764		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1765		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1766
1767	if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
1768		drm_dbg_kms(display->drm,
1769			    "HDCP1.x or 2.0 Legacy Device Downstream\n");
1770		return -EINVAL;
1771	}
1772
1773	/* Converting and Storing the seq_num_v to local variable as DWORD */
1774	seq_num_v =
1775		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1776
1777	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1778		drm_dbg_kms(display->drm,
1779			    "Non zero Seq_num_v at first RecvId_List msg\n");
1780		return -EINVAL;
1781	}
1782
1783	if (seq_num_v < hdcp->seq_num_v) {
1784		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1785		drm_dbg_kms(display->drm, "Seq_num_v roll over.\n");
1786		return -EINVAL;
1787	}
1788
1789	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1790		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1791	if (drm_hdcp_check_ksvs_revoked(display->drm,
1792					msgs.recvid_list.receiver_ids,
1793					device_cnt) > 0) {
1794		drm_err(display->drm, "Revoked receiver ID(s) is in list\n");
1795		return -EPERM;
1796	}
1797
1798	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1799						    &msgs.recvid_list,
1800						    &msgs.rep_ack);
1801	if (ret < 0)
1802		return ret;
1803
1804	hdcp->seq_num_v = seq_num_v;
1805	ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1806				  sizeof(msgs.rep_ack));
1807	if (ret < 0)
1808		return ret;
1809
1810	return 0;
1811}
1812
1813static int hdcp2_authenticate_sink(struct intel_connector *connector)
1814{
1815	struct intel_display *display = to_intel_display(connector);
 
1816	struct intel_hdcp *hdcp = &connector->hdcp;
1817	const struct intel_hdcp_shim *shim = hdcp->shim;
1818	int ret;
1819
1820	ret = hdcp2_authentication_key_exchange(connector);
1821	if (ret < 0) {
1822		drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret);
1823		return ret;
1824	}
1825
1826	ret = hdcp2_locality_check(connector);
1827	if (ret < 0) {
1828		drm_dbg_kms(display->drm,
1829			    "Locality Check failed. Err : %d\n", ret);
1830		return ret;
1831	}
1832
1833	ret = hdcp2_session_key_exchange(connector);
1834	if (ret < 0) {
1835		drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret);
1836		return ret;
1837	}
1838
1839	if (shim->config_stream_type) {
1840		ret = shim->config_stream_type(connector,
1841					       hdcp->is_repeater,
1842					       hdcp->content_type);
1843		if (ret < 0)
1844			return ret;
1845	}
1846
1847	if (hdcp->is_repeater) {
1848		ret = hdcp2_authenticate_repeater_topology(connector);
1849		if (ret < 0) {
1850			drm_dbg_kms(display->drm,
1851				    "Repeater Auth Failed. Err: %d\n", ret);
1852			return ret;
1853		}
1854	}
1855
1856	return ret;
1857}
1858
1859static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1860{
1861	struct intel_display *display = to_intel_display(connector);
1862	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1863	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1864	struct intel_hdcp *hdcp = &connector->hdcp;
1865	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1866	enum port port = dig_port->base.port;
1867	int ret = 0;
1868
1869	if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1870			    LINK_ENCRYPTION_STATUS)) {
1871		drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1872			connector->base.base.id, connector->base.name);
1873		ret = -EPERM;
1874		goto link_recover;
1875	}
1876
1877	if (hdcp->shim->stream_2_2_encryption) {
1878		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1879		if (ret) {
1880			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1881				connector->base.base.id, connector->base.name);
1882			return ret;
1883		}
1884		drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1885			    transcoder_name(hdcp->stream_transcoder));
1886	}
1887
1888	return 0;
1889
1890link_recover:
1891	if (hdcp2_deauthenticate_port(connector) < 0)
1892		drm_dbg_kms(display->drm, "Port deauth failed.\n");
1893
1894	dig_port->hdcp_auth_status = false;
1895	data->k = 0;
1896
1897	return ret;
1898}
1899
1900static int hdcp2_enable_encryption(struct intel_connector *connector)
1901{
1902	struct intel_display *display = to_intel_display(connector);
1903	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1904	struct intel_hdcp *hdcp = &connector->hdcp;
1905	enum port port = dig_port->base.port;
1906	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1907	int ret;
1908
1909	drm_WARN_ON(display->drm,
1910		    intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1911		    LINK_ENCRYPTION_STATUS);
1912	if (hdcp->shim->toggle_signalling) {
1913		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1914						    true);
1915		if (ret) {
1916			drm_err(display->drm,
1917				"Failed to enable HDCP signalling. %d\n",
1918				ret);
1919			return ret;
1920		}
1921	}
1922
1923	if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1924	    LINK_AUTH_STATUS)
1925		/* Link is Authenticated. Now set for Encryption */
1926		intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1927			     0, CTL_LINK_ENCRYPTION_REQ);
 
 
1928
1929	ret = intel_de_wait_for_set(display,
1930				    HDCP2_STATUS(display, cpu_transcoder,
1931						 port),
1932				    LINK_ENCRYPTION_STATUS,
1933				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1934	dig_port->hdcp_auth_status = true;
1935
1936	return ret;
1937}
1938
1939static int hdcp2_disable_encryption(struct intel_connector *connector)
1940{
1941	struct intel_display *display = to_intel_display(connector);
1942	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1943	struct intel_hdcp *hdcp = &connector->hdcp;
1944	enum port port = dig_port->base.port;
1945	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1946	int ret;
1947
1948	drm_WARN_ON(display->drm,
1949		    !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1950				    LINK_ENCRYPTION_STATUS));
1951
1952	intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1953		     CTL_LINK_ENCRYPTION_REQ, 0);
1954
1955	ret = intel_de_wait_for_clear(display,
1956				      HDCP2_STATUS(display, cpu_transcoder,
1957						   port),
1958				      LINK_ENCRYPTION_STATUS,
1959				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1960	if (ret == -ETIMEDOUT)
1961		drm_dbg_kms(display->drm, "Disable Encryption Timedout");
1962
1963	if (hdcp->shim->toggle_signalling) {
1964		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1965						    false);
1966		if (ret) {
1967			drm_err(display->drm,
1968				"Failed to disable HDCP signalling. %d\n",
1969				ret);
1970			return ret;
1971		}
1972	}
1973
1974	return ret;
1975}
1976
1977static int
1978hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1979{
1980	struct intel_display *display = to_intel_display(connector);
1981	int i, tries = 3, ret;
1982
1983	if (!connector->hdcp.is_repeater)
1984		return 0;
1985
1986	for (i = 0; i < tries; i++) {
1987		ret = _hdcp2_propagate_stream_management_info(connector);
1988		if (!ret)
1989			break;
1990
1991		/* Lets restart the auth incase of seq_num_m roll over */
1992		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1993			drm_dbg_kms(display->drm,
1994				    "seq_num_m roll over.(%d)\n", ret);
1995			break;
1996		}
1997
1998		drm_dbg_kms(display->drm,
1999			    "HDCP2 stream management %d of %d Failed.(%d)\n",
2000			    i + 1, tries, ret);
2001	}
2002
2003	return ret;
2004}
2005
2006static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
2007					  struct intel_connector *connector)
2008{
2009	struct intel_display *display = to_intel_display(connector);
2010	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
2011	int ret = 0, i, tries = 3;
2012
2013	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
2014		ret = hdcp2_authenticate_sink(connector);
2015		if (!ret) {
2016			ret = intel_hdcp_prepare_streams(state, connector);
2017			if (ret) {
2018				drm_dbg_kms(display->drm,
2019					    "Prepare stream failed.(%d)\n",
2020					    ret);
2021				break;
2022			}
2023
2024			ret = hdcp2_propagate_stream_management_info(connector);
2025			if (ret) {
2026				drm_dbg_kms(display->drm,
2027					    "Stream management failed.(%d)\n",
2028					    ret);
2029				break;
2030			}
2031
2032			ret = hdcp2_authenticate_port(connector);
2033			if (!ret)
2034				break;
2035			drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n",
2036				    ret);
2037		}
2038
2039		/* Clearing the mei hdcp session */
2040		drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
2041			    i + 1, tries, ret);
2042		if (hdcp2_deauthenticate_port(connector) < 0)
2043			drm_dbg_kms(display->drm, "Port deauth failed.\n");
2044	}
2045
2046	if (!ret && !dig_port->hdcp_auth_status) {
2047		/*
2048		 * Ensuring the required 200mSec min time interval between
2049		 * Session Key Exchange and encryption.
2050		 */
2051		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2052		ret = hdcp2_enable_encryption(connector);
2053		if (ret < 0) {
2054			drm_dbg_kms(display->drm,
2055				    "Encryption Enable Failed.(%d)\n", ret);
2056			if (hdcp2_deauthenticate_port(connector) < 0)
2057				drm_dbg_kms(display->drm, "Port deauth failed.\n");
2058		}
2059	}
2060
2061	if (!ret)
2062		ret = hdcp2_enable_stream_encryption(connector);
2063
2064	return ret;
2065}
2066
2067static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2068			       struct intel_connector *connector)
2069{
2070	struct intel_display *display = to_intel_display(connector);
 
 
2071	struct intel_hdcp *hdcp = &connector->hdcp;
2072	int ret;
2073
2074	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2075		    connector->base.base.id, connector->base.name,
2076		    hdcp->content_type);
2077
2078	intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
 
 
 
 
 
 
 
 
2079
2080	ret = hdcp2_authenticate_and_encrypt(state, connector);
2081	if (ret) {
2082		drm_dbg_kms(display->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
2083			    hdcp->content_type, ret);
2084		return ret;
2085	}
2086
2087	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2088		    connector->base.base.id, connector->base.name,
2089		    hdcp->content_type);
2090
2091	hdcp->hdcp2_encrypted = true;
2092	return 0;
2093}
2094
2095static int
2096_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2097{
2098	struct intel_display *display = to_intel_display(connector);
2099	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
2100	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2101	struct intel_hdcp *hdcp = &connector->hdcp;
2102	int ret;
2103
2104	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2105		    connector->base.base.id, connector->base.name);
2106
2107	if (hdcp->shim->stream_2_2_encryption) {
2108		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2109		if (ret) {
2110			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2111				connector->base.base.id, connector->base.name);
2112			return ret;
2113		}
2114		drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2115			    transcoder_name(hdcp->stream_transcoder));
2116
2117		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2118			return 0;
2119	}
2120
2121	ret = hdcp2_disable_encryption(connector);
2122
2123	if (hdcp2_deauthenticate_port(connector) < 0)
2124		drm_dbg_kms(display->drm, "Port deauth failed.\n");
2125
2126	connector->hdcp.hdcp2_encrypted = false;
2127	dig_port->hdcp_auth_status = false;
2128	data->k = 0;
2129
2130	return ret;
2131}
2132
2133/* Implements the Link Integrity Check for HDCP2.2 */
2134static int intel_hdcp2_check_link(struct intel_connector *connector)
2135{
2136	struct intel_display *display = to_intel_display(connector);
2137	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
2138	struct intel_hdcp *hdcp = &connector->hdcp;
2139	enum port port = dig_port->base.port;
2140	enum transcoder cpu_transcoder;
2141	int ret = 0;
2142
2143	mutex_lock(&hdcp->mutex);
2144	mutex_lock(&dig_port->hdcp_mutex);
2145	cpu_transcoder = hdcp->cpu_transcoder;
2146
2147	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2148	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2149	    !hdcp->hdcp2_encrypted) {
2150		ret = -EINVAL;
2151		goto out;
2152	}
2153
2154	if (drm_WARN_ON(display->drm,
2155			!intel_hdcp2_in_use(display, cpu_transcoder, port))) {
2156		drm_err(display->drm,
2157			"HDCP2.2 link stopped the encryption, %x\n",
2158			intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)));
2159		ret = -ENXIO;
2160		_intel_hdcp2_disable(connector, true);
2161		intel_hdcp_update_value(connector,
2162					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2163					true);
2164		goto out;
2165	}
2166
2167	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2168	if (ret == HDCP_LINK_PROTECTED) {
2169		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2170			intel_hdcp_update_value(connector,
2171					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2172					true);
2173		}
2174		goto out;
2175	}
2176
2177	if (ret == HDCP_TOPOLOGY_CHANGE) {
2178		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2179			goto out;
2180
2181		drm_dbg_kms(display->drm,
2182			    "HDCP2.2 Downstream topology change\n");
2183
2184		ret = hdcp2_authenticate_repeater_topology(connector);
2185		if (!ret) {
2186			intel_hdcp_update_value(connector,
2187						DRM_MODE_CONTENT_PROTECTION_ENABLED,
2188						true);
2189			goto out;
2190		}
2191
2192		drm_dbg_kms(display->drm,
2193			    "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
2194			    connector->base.base.id, connector->base.name,
2195			    ret);
2196	} else {
2197		drm_dbg_kms(display->drm,
2198			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2199			    connector->base.base.id, connector->base.name);
2200	}
2201
2202	ret = _intel_hdcp2_disable(connector, true);
2203	if (ret) {
2204		drm_err(display->drm,
2205			"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2206			connector->base.base.id, connector->base.name, ret);
2207		intel_hdcp_update_value(connector,
2208				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2209		goto out;
2210	}
2211
2212	intel_hdcp_update_value(connector,
2213				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
 
 
 
 
 
 
 
 
 
 
2214out:
2215	mutex_unlock(&dig_port->hdcp_mutex);
2216	mutex_unlock(&hdcp->mutex);
2217	return ret;
2218}
2219
2220static void intel_hdcp_check_work(struct work_struct *work)
2221{
2222	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2223					       struct intel_hdcp,
2224					       check_work);
2225	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2226	struct intel_display *display = to_intel_display(connector);
2227	struct drm_i915_private *i915 = to_i915(display->drm);
2228
2229	if (drm_connector_is_unregistered(&connector->base))
2230		return;
2231
2232	if (!intel_hdcp2_check_link(connector))
2233		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2234				   DRM_HDCP2_CHECK_PERIOD_MS);
2235	else if (!intel_hdcp_check_link(connector))
2236		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2237				   DRM_HDCP_CHECK_PERIOD_MS);
2238}
2239
2240static int i915_hdcp_component_bind(struct device *drv_kdev,
2241				    struct device *mei_kdev, void *data)
2242{
2243	struct intel_display *display = to_intel_display(drv_kdev);
2244
2245	drm_dbg(display->drm, "I915 HDCP comp bind\n");
2246	mutex_lock(&display->hdcp.hdcp_mutex);
2247	display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2248	display->hdcp.arbiter->hdcp_dev = mei_kdev;
2249	mutex_unlock(&display->hdcp.hdcp_mutex);
2250
2251	return 0;
2252}
2253
2254static void i915_hdcp_component_unbind(struct device *drv_kdev,
2255				       struct device *mei_kdev, void *data)
2256{
2257	struct intel_display *display = to_intel_display(drv_kdev);
2258
2259	drm_dbg(display->drm, "I915 HDCP comp unbind\n");
2260	mutex_lock(&display->hdcp.hdcp_mutex);
2261	display->hdcp.arbiter = NULL;
2262	mutex_unlock(&display->hdcp.hdcp_mutex);
2263}
2264
2265static const struct component_ops i915_hdcp_ops = {
2266	.bind   = i915_hdcp_component_bind,
2267	.unbind = i915_hdcp_component_unbind,
2268};
2269
2270static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2271{
2272	switch (port) {
2273	case PORT_A:
2274		return HDCP_DDI_A;
2275	case PORT_B ... PORT_F:
2276		return (enum hdcp_ddi)port;
2277	default:
2278		return HDCP_DDI_INVALID_PORT;
2279	}
2280}
2281
2282static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2283{
2284	switch (cpu_transcoder) {
2285	case TRANSCODER_A ... TRANSCODER_D:
2286		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2287	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2288		return HDCP_INVALID_TRANSCODER;
2289	}
2290}
2291
2292static int initialize_hdcp_port_data(struct intel_connector *connector,
2293				     struct intel_digital_port *dig_port,
2294				     const struct intel_hdcp_shim *shim)
2295{
2296	struct intel_display *display = to_intel_display(connector);
2297	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 
2298	enum port port = dig_port->base.port;
2299
2300	if (DISPLAY_VER(display) < 12)
2301		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2302	else
2303		/*
2304		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2305		 * with zero(INVALID PORT index).
2306		 */
2307		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2308
2309	/*
2310	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2311	 * is initialized to zero (invalid transcoder index). This will be
2312	 * retained for <Gen12 forever.
2313	 */
2314	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2315
2316	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2317	data->protocol = (u8)shim->protocol;
2318
2319	if (!data->streams)
2320		data->streams = kcalloc(INTEL_NUM_PIPES(display),
2321					sizeof(struct hdcp2_streamid_type),
2322					GFP_KERNEL);
2323	if (!data->streams) {
2324		drm_err(display->drm, "Out of Memory\n");
2325		return -ENOMEM;
2326	}
 
 
 
2327
2328	return 0;
2329}
2330
2331static bool is_hdcp2_supported(struct intel_display *display)
2332{
2333	struct drm_i915_private *i915 = to_i915(display->drm);
2334
2335	if (intel_hdcp_gsc_cs_required(display))
2336		return true;
2337
2338	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2339		return false;
2340
2341	return (DISPLAY_VER(display) >= 10 ||
2342		IS_KABYLAKE(i915) ||
2343		IS_COFFEELAKE(i915) ||
2344		IS_COMETLAKE(i915));
2345}
2346
2347void intel_hdcp_component_init(struct intel_display *display)
2348{
2349	int ret;
2350
2351	if (!is_hdcp2_supported(display))
2352		return;
2353
2354	mutex_lock(&display->hdcp.hdcp_mutex);
2355	drm_WARN_ON(display->drm, display->hdcp.comp_added);
2356
2357	display->hdcp.comp_added = true;
2358	mutex_unlock(&display->hdcp.hdcp_mutex);
2359	if (intel_hdcp_gsc_cs_required(display))
2360		ret = intel_hdcp_gsc_init(display);
2361	else
2362		ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
2363					  I915_COMPONENT_HDCP);
2364
 
 
 
 
2365	if (ret < 0) {
2366		drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n",
2367			    ret);
2368		mutex_lock(&display->hdcp.hdcp_mutex);
2369		display->hdcp.comp_added = false;
2370		mutex_unlock(&display->hdcp.hdcp_mutex);
2371		return;
2372	}
2373}
2374
2375static void intel_hdcp2_init(struct intel_connector *connector,
2376			     struct intel_digital_port *dig_port,
2377			     const struct intel_hdcp_shim *shim)
2378{
2379	struct intel_display *display = to_intel_display(connector);
2380	struct intel_hdcp *hdcp = &connector->hdcp;
2381	int ret;
2382
2383	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2384	if (ret) {
2385		drm_dbg_kms(display->drm, "Mei hdcp data init failed\n");
2386		return;
2387	}
2388
2389	hdcp->hdcp2_supported = true;
2390}
2391
2392int intel_hdcp_init(struct intel_connector *connector,
2393		    struct intel_digital_port *dig_port,
2394		    const struct intel_hdcp_shim *shim)
2395{
2396	struct intel_display *display = to_intel_display(connector);
2397	struct intel_hdcp *hdcp = &connector->hdcp;
2398	int ret;
2399
2400	if (!shim)
2401		return -EINVAL;
2402
2403	if (is_hdcp2_supported(display))
2404		intel_hdcp2_init(connector, dig_port, shim);
2405
2406	ret = drm_connector_attach_content_protection_property(&connector->base,
2407							       hdcp->hdcp2_supported);
 
2408	if (ret) {
2409		hdcp->hdcp2_supported = false;
2410		kfree(dig_port->hdcp_port_data.streams);
2411		return ret;
2412	}
2413
2414	hdcp->shim = shim;
2415	mutex_init(&hdcp->mutex);
2416	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2417	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2418	init_waitqueue_head(&hdcp->cp_irq_queue);
2419
2420	return 0;
2421}
2422
2423static int _intel_hdcp_enable(struct intel_atomic_state *state,
2424			      struct intel_encoder *encoder,
2425			      const struct intel_crtc_state *pipe_config,
2426			      const struct drm_connector_state *conn_state)
2427{
2428	struct intel_display *display = to_intel_display(encoder);
2429	struct drm_i915_private *i915 = to_i915(display->drm);
2430	struct intel_connector *connector =
2431		to_intel_connector(conn_state->connector);
2432	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2433	struct intel_hdcp *hdcp = &connector->hdcp;
2434	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2435	int ret = -EINVAL;
2436
2437	if (!hdcp->shim)
2438		return -ENOENT;
2439
2440	if (!connector->encoder) {
2441		drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2442			connector->base.base.id, connector->base.name);
2443		return -ENODEV;
2444	}
2445
2446	mutex_lock(&hdcp->mutex);
2447	mutex_lock(&dig_port->hdcp_mutex);
2448	drm_WARN_ON(display->drm,
2449		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2450	hdcp->content_type = (u8)conn_state->hdcp_content_type;
2451
2452	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2453		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2454		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2455	} else {
2456		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2457		hdcp->stream_transcoder = INVALID_TRANSCODER;
2458	}
2459
2460	if (DISPLAY_VER(display) >= 12)
2461		dig_port->hdcp_port_data.hdcp_transcoder =
2462			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2463
2464	/*
2465	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2466	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2467	 */
2468	if (intel_hdcp2_get_capability(connector)) {
2469		ret = _intel_hdcp2_enable(state, connector);
2470		if (!ret)
2471			check_link_interval =
2472				DRM_HDCP2_CHECK_PERIOD_MS;
2473	}
2474
2475	/*
2476	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2477	 * be attempted.
2478	 */
2479	if (ret && intel_hdcp_get_capability(connector) &&
2480	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2481		ret = intel_hdcp1_enable(connector);
2482	}
2483
2484	if (!ret) {
2485		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2486				   check_link_interval);
2487		intel_hdcp_update_value(connector,
2488					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2489					true);
2490	}
2491
2492	mutex_unlock(&dig_port->hdcp_mutex);
2493	mutex_unlock(&hdcp->mutex);
2494	return ret;
2495}
2496
2497void intel_hdcp_enable(struct intel_atomic_state *state,
2498		       struct intel_encoder *encoder,
2499		       const struct intel_crtc_state *crtc_state,
2500		       const struct drm_connector_state *conn_state)
2501{
2502	struct intel_connector *connector =
2503		to_intel_connector(conn_state->connector);
2504	struct intel_hdcp *hdcp = &connector->hdcp;
2505
2506	/*
2507	 * Enable hdcp if it's desired or if userspace is enabled and
2508	 * driver set its state to undesired
2509	 */
2510	if (conn_state->content_protection ==
2511	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2512	    (conn_state->content_protection ==
2513	    DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2514	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2515		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2516}
2517
2518int intel_hdcp_disable(struct intel_connector *connector)
2519{
2520	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2521	struct intel_hdcp *hdcp = &connector->hdcp;
2522	int ret = 0;
2523
2524	if (!hdcp->shim)
2525		return -ENOENT;
2526
2527	mutex_lock(&hdcp->mutex);
2528	mutex_lock(&dig_port->hdcp_mutex);
2529
2530	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2531		goto out;
2532
2533	intel_hdcp_update_value(connector,
2534				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2535	if (hdcp->hdcp2_encrypted)
2536		ret = _intel_hdcp2_disable(connector, false);
2537	else if (hdcp->hdcp_encrypted)
2538		ret = _intel_hdcp_disable(connector);
2539
2540out:
2541	mutex_unlock(&dig_port->hdcp_mutex);
2542	mutex_unlock(&hdcp->mutex);
2543	cancel_delayed_work_sync(&hdcp->check_work);
2544	return ret;
2545}
2546
2547void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2548			    struct intel_encoder *encoder,
2549			    const struct intel_crtc_state *crtc_state,
2550			    const struct drm_connector_state *conn_state)
2551{
2552	struct intel_connector *connector =
2553				to_intel_connector(conn_state->connector);
2554	struct intel_hdcp *hdcp = &connector->hdcp;
2555	bool content_protection_type_changed, desired_and_not_enabled = false;
2556	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2557
2558	if (!connector->hdcp.shim)
2559		return;
2560
2561	content_protection_type_changed =
2562		(conn_state->hdcp_content_type != hdcp->content_type &&
2563		 conn_state->content_protection !=
2564		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2565
2566	/*
2567	 * During the HDCP encryption session if Type change is requested,
2568	 * disable the HDCP and reenable it with new TYPE value.
2569	 */
2570	if (conn_state->content_protection ==
2571	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2572	    content_protection_type_changed)
2573		intel_hdcp_disable(connector);
2574
2575	/*
2576	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2577	 * change procedure.
2578	 */
2579	if (content_protection_type_changed) {
2580		mutex_lock(&hdcp->mutex);
2581		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2582		drm_connector_get(&connector->base);
2583		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2584			drm_connector_put(&connector->base);
2585		mutex_unlock(&hdcp->mutex);
2586	}
2587
2588	if (conn_state->content_protection ==
2589	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2590		mutex_lock(&hdcp->mutex);
2591		/* Avoid enabling hdcp, if it already ENABLED */
2592		desired_and_not_enabled =
2593			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2594		mutex_unlock(&hdcp->mutex);
2595		/*
2596		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2597		 * prop_work to update correct CP property to user space.
2598		 */
2599		if (!desired_and_not_enabled && !content_protection_type_changed) {
2600			drm_connector_get(&connector->base);
2601			if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2602				drm_connector_put(&connector->base);
2603
2604		}
2605	}
2606
2607	if (desired_and_not_enabled || content_protection_type_changed)
2608		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
 
 
2609}
2610
2611void intel_hdcp_component_fini(struct intel_display *display)
2612{
2613	mutex_lock(&display->hdcp.hdcp_mutex);
2614	if (!display->hdcp.comp_added) {
2615		mutex_unlock(&display->hdcp.hdcp_mutex);
2616		return;
2617	}
2618
2619	display->hdcp.comp_added = false;
2620	mutex_unlock(&display->hdcp.hdcp_mutex);
2621
2622	if (intel_hdcp_gsc_cs_required(display))
2623		intel_hdcp_gsc_fini(display);
2624	else
2625		component_del(display->drm->dev, &i915_hdcp_ops);
2626}
2627
2628void intel_hdcp_cleanup(struct intel_connector *connector)
2629{
2630	struct intel_hdcp *hdcp = &connector->hdcp;
2631
2632	if (!hdcp->shim)
2633		return;
2634
2635	/*
2636	 * If the connector is registered, it's possible userspace could kick
2637	 * off another HDCP enable, which would re-spawn the workers.
2638	 */
2639	drm_WARN_ON(connector->base.dev,
2640		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2641
2642	/*
2643	 * Now that the connector is not registered, check_work won't be run,
2644	 * but cancel any outstanding instances of it
2645	 */
2646	cancel_delayed_work_sync(&hdcp->check_work);
2647
2648	/*
2649	 * We don't cancel prop_work in the same way as check_work since it
2650	 * requires connection_mutex which could be held while calling this
2651	 * function. Instead, we rely on the connector references grabbed before
2652	 * scheduling prop_work to ensure the connector is alive when prop_work
2653	 * is run. So if we're in the destroy path (which is where this
2654	 * function should be called), we're "guaranteed" that prop_work is not
2655	 * active (tl;dr This Should Never Happen).
2656	 */
2657	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2658
2659	mutex_lock(&hdcp->mutex);
2660	hdcp->shim = NULL;
2661	mutex_unlock(&hdcp->mutex);
2662}
2663
2664void intel_hdcp_atomic_check(struct drm_connector *connector,
2665			     struct drm_connector_state *old_state,
2666			     struct drm_connector_state *new_state)
2667{
2668	u64 old_cp = old_state->content_protection;
2669	u64 new_cp = new_state->content_protection;
2670	struct drm_crtc_state *crtc_state;
2671
2672	if (!new_state->crtc) {
2673		/*
2674		 * If the connector is being disabled with CP enabled, mark it
2675		 * desired so it's re-enabled when the connector is brought back
2676		 */
2677		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2678			new_state->content_protection =
2679				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2680		return;
2681	}
2682
2683	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2684						   new_state->crtc);
2685	/*
2686	 * Fix the HDCP uapi content protection state in case of modeset.
2687	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2688	 * need to be sent if there is transition from ENABLED->DESIRED.
2689	 */
2690	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2691	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2692	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2693		new_state->content_protection =
2694			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2695
2696	/*
2697	 * Nothing to do if the state didn't change, or HDCP was activated since
2698	 * the last commit. And also no change in hdcp content type.
2699	 */
2700	if (old_cp == new_cp ||
2701	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2702	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2703		if (old_state->hdcp_content_type ==
2704				new_state->hdcp_content_type)
2705			return;
2706	}
2707
2708	crtc_state->mode_changed = true;
2709}
2710
2711/* Handles the CP_IRQ raised from the DP HDCP sink */
2712void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2713{
2714	struct intel_hdcp *hdcp = &connector->hdcp;
2715	struct intel_display *display = to_intel_display(connector);
2716	struct drm_i915_private *i915 = to_i915(display->drm);
2717
2718	if (!hdcp->shim)
2719		return;
2720
2721	atomic_inc(&connector->hdcp.cp_irq_count);
2722	wake_up_all(&connector->hdcp.cp_irq_queue);
2723
2724	queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2725}