Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright (C) 2017 Google, Inc.
   4 * Copyright _ 2017-2019, Intel Corporation.
   5 *
   6 * Authors:
   7 * Sean Paul <seanpaul@chromium.org>
   8 * Ramalingam C <ramalingam.c@intel.com>
   9 */
  10
  11#include <linux/component.h>
  12#include <linux/i2c.h>
  13#include <linux/random.h>
  14
  15#include <drm/display/drm_hdcp_helper.h>
  16#include <drm/intel/i915_component.h>
  17
  18#include "i915_drv.h"
  19#include "i915_reg.h"
  20#include "intel_connector.h"
  21#include "intel_de.h"
  22#include "intel_display_power.h"
  23#include "intel_display_power_well.h"
  24#include "intel_display_types.h"
  25#include "intel_hdcp.h"
  26#include "intel_hdcp_gsc.h"
  27#include "intel_hdcp_regs.h"
  28#include "intel_hdcp_shim.h"
  29#include "intel_pcode.h"
  30
  31#define KEY_LOAD_TRIES	5
 
  32#define HDCP2_LC_RETRY_CNT			3
  33
  34/* WA: 16022217614 */
  35static void
  36intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
  37				      struct intel_hdcp *hdcp)
  38{
  39	struct intel_display *display = to_intel_display(encoder);
  40
  41	/* Here we assume HDMI is in TMDS mode of operation */
  42	if (encoder->type != INTEL_OUTPUT_HDMI)
  43		return;
  44
  45	if (DISPLAY_VER(display) >= 14) {
  46		if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER))
  47			intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
  48				     0, HDCP_LINE_REKEY_DISABLE);
  49		else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
  50			 IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER))
  51			intel_de_rmw(display,
  52				     TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder),
  53				     0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
  54	}
  55}
  56
  57static int intel_conn_to_vcpi(struct intel_atomic_state *state,
  58			      struct intel_connector *connector)
  59{
  60	struct drm_dp_mst_topology_mgr *mgr;
  61	struct drm_dp_mst_atomic_payload *payload;
  62	struct drm_dp_mst_topology_state *mst_state;
  63	int vcpi = 0;
  64
  65	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
  66	if (!connector->port)
  67		return 0;
  68	mgr = connector->port->mgr;
  69
  70	drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
  71	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
  72	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
  73	if (drm_WARN_ON(mgr->dev, !payload))
  74		goto out;
  75
  76	vcpi = payload->vcpi;
  77	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
  78		vcpi = 0;
  79		goto out;
  80	}
  81out:
  82	return vcpi;
  83}
  84
  85/*
  86 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
  87 * content_type for all streams in DP MST topology because security f/w doesn't
  88 * have any provision to mark content_type for each stream separately, it marks
  89 * all available streams with the content_type proivided at the time of port
  90 * authentication. This may prohibit the userspace to use type1 content on
  91 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
  92 * DP MST topology. Though it is not compulsory, security fw should change its
  93 * policy to mark different content_types for different streams.
  94 */
  95static int
  96intel_hdcp_required_content_stream(struct intel_atomic_state *state,
  97				   struct intel_digital_port *dig_port)
  98{
  99	struct intel_display *display = to_intel_display(state);
 100	struct drm_connector_list_iter conn_iter;
 101	struct intel_digital_port *conn_dig_port;
 102	struct intel_connector *connector;
 103	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 104	bool enforce_type0 = false;
 105	int k;
 106
 107	if (dig_port->hdcp_auth_status)
 108		return 0;
 109
 110	data->k = 0;
 111
 112	if (!dig_port->hdcp_mst_type1_capable)
 113		enforce_type0 = true;
 114
 115	drm_connector_list_iter_begin(display->drm, &conn_iter);
 116	for_each_intel_connector_iter(connector, &conn_iter) {
 117		if (connector->base.status == connector_status_disconnected)
 118			continue;
 119
 120		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
 121			continue;
 122
 123		conn_dig_port = intel_attached_dig_port(connector);
 124		if (conn_dig_port != dig_port)
 125			continue;
 126
 127		data->streams[data->k].stream_id =
 128			intel_conn_to_vcpi(state, connector);
 129		data->k++;
 130
 131		/* if there is only one active stream */
 132		if (dig_port->dp.active_mst_links <= 1)
 133			break;
 134	}
 135	drm_connector_list_iter_end(&conn_iter);
 136
 137	if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0))
 138		return -EINVAL;
 139
 140	/*
 141	 * Apply common protection level across all streams in DP MST Topology.
 142	 * Use highest supported content type for all streams in DP MST Topology.
 143	 */
 144	for (k = 0; k < data->k; k++)
 145		data->streams[k].stream_type =
 146			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
 147
 148	return 0;
 149}
 150
 151static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
 152				      struct intel_connector *connector)
 153{
 154	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 155	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
 156	struct intel_hdcp *hdcp = &connector->hdcp;
 157
 158	if (intel_encoder_is_mst(intel_attached_encoder(connector)))
 159		return intel_hdcp_required_content_stream(state, dig_port);
 160
 161	data->k = 1;
 162	data->streams[0].stream_id = 0;
 163	data->streams[0].stream_type = hdcp->content_type;
 164
 165	return 0;
 166}
 167
 168static
 169bool intel_hdcp_is_ksv_valid(u8 *ksv)
 170{
 171	int i, ones = 0;
 172	/* KSV has 20 1's and 20 0's */
 173	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
 174		ones += hweight8(ksv[i]);
 175	if (ones != 20)
 176		return false;
 177
 178	return true;
 179}
 180
 181static
 182int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
 183			       const struct intel_hdcp_shim *shim, u8 *bksv)
 184{
 185	struct intel_display *display = to_intel_display(dig_port);
 186	int ret, i, tries = 2;
 187
 188	/* HDCP spec states that we must retry the bksv if it is invalid */
 189	for (i = 0; i < tries; i++) {
 190		ret = shim->read_bksv(dig_port, bksv);
 191		if (ret)
 192			return ret;
 193		if (intel_hdcp_is_ksv_valid(bksv))
 194			break;
 195	}
 196	if (i == tries) {
 197		drm_dbg_kms(display->drm, "Bksv is invalid\n");
 198		return -ENODEV;
 199	}
 200
 201	return 0;
 202}
 203
 204/* Is HDCP1.4 capable on Platform and Sink */
 205bool intel_hdcp_get_capability(struct intel_connector *connector)
 206{
 207	struct intel_digital_port *dig_port;
 208	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 209	bool capable = false;
 210	u8 bksv[5];
 211
 212	if (!intel_attached_encoder(connector))
 213		return capable;
 214
 215	dig_port = intel_attached_dig_port(connector);
 216
 217	if (!shim)
 218		return capable;
 219
 220	if (shim->hdcp_get_capability) {
 221		shim->hdcp_get_capability(dig_port, &capable);
 222	} else {
 223		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
 224			capable = true;
 225	}
 226
 227	return capable;
 228}
 229
 230/*
 231 * Check if the source has all the building blocks ready to make
 232 * HDCP 2.2 work
 233 */
 234static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
 235{
 236	struct intel_display *display = to_intel_display(connector);
 
 237	struct intel_hdcp *hdcp = &connector->hdcp;
 
 238
 239	/* I915 support for HDCP2.2 */
 240	if (!hdcp->hdcp2_supported)
 241		return false;
 242
 243	/* If MTL+ make sure gsc is loaded and proxy is setup */
 244	if (intel_hdcp_gsc_cs_required(display)) {
 245		if (!intel_hdcp_gsc_check_status(display))
 246			return false;
 247	}
 248
 249	/* MEI/GSC interface is solid depending on which is used */
 250	mutex_lock(&display->hdcp.hdcp_mutex);
 251	if (!display->hdcp.comp_added || !display->hdcp.arbiter) {
 252		mutex_unlock(&display->hdcp.hdcp_mutex);
 253		return false;
 254	}
 255	mutex_unlock(&display->hdcp.hdcp_mutex);
 256
 257	return true;
 258}
 259
 260/* Is HDCP2.2 capable on Platform and Sink */
 261bool intel_hdcp2_get_capability(struct intel_connector *connector)
 262{
 263	struct intel_hdcp *hdcp = &connector->hdcp;
 264	bool capable = false;
 265
 266	if (!intel_hdcp2_prerequisite(connector))
 267		return false;
 268
 269	/* Sink's capability for HDCP2.2 */
 270	hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
 271
 272	return capable;
 273}
 274
 275void intel_hdcp_get_remote_capability(struct intel_connector *connector,
 276				      bool *hdcp_capable,
 277				      bool *hdcp2_capable)
 278{
 279	struct intel_hdcp *hdcp = &connector->hdcp;
 280
 281	if (!hdcp->shim->get_remote_hdcp_capability)
 282		return;
 283
 284	hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
 285					       hdcp2_capable);
 286
 287	if (!intel_hdcp2_prerequisite(connector))
 288		*hdcp2_capable = false;
 289}
 290
 291static bool intel_hdcp_in_use(struct intel_display *display,
 292			      enum transcoder cpu_transcoder, enum port port)
 293{
 294	return intel_de_read(display,
 295			     HDCP_STATUS(display, cpu_transcoder, port)) &
 296		HDCP_STATUS_ENC;
 297}
 298
 299static bool intel_hdcp2_in_use(struct intel_display *display,
 300			       enum transcoder cpu_transcoder, enum port port)
 301{
 302	return intel_de_read(display,
 303			     HDCP2_STATUS(display, cpu_transcoder, port)) &
 304		LINK_ENCRYPTION_STATUS;
 305}
 306
 307static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 308				    const struct intel_hdcp_shim *shim)
 309{
 310	int ret, read_ret;
 311	bool ksv_ready;
 312
 313	/* Poll for ksv list ready (spec says max time allowed is 5s) */
 314	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
 315							 &ksv_ready),
 316			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
 317			 100 * 1000);
 318	if (ret)
 319		return ret;
 320	if (read_ret)
 321		return read_ret;
 322	if (!ksv_ready)
 323		return -ETIMEDOUT;
 324
 325	return 0;
 326}
 327
 328static bool hdcp_key_loadable(struct intel_display *display)
 329{
 330	struct drm_i915_private *i915 = to_i915(display->drm);
 
 331	enum i915_power_well_id id;
 332	intel_wakeref_t wakeref;
 333	bool enabled = false;
 334
 335	/*
 336	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
 337	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
 338	 */
 339	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 340		id = HSW_DISP_PW_GLOBAL;
 341	else
 342		id = SKL_DISP_PW_1;
 343
 
 
 344	/* PG1 (power well #1) needs to be enabled */
 345	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 346		enabled = intel_display_power_well_is_enabled(i915, id);
 
 
 
 
 
 
 347
 348	/*
 349	 * Another req for hdcp key loadability is enabled state of pll for
 350	 * cdclk. Without active crtc we wont land here. So we are assuming that
 351	 * cdclk is already on.
 352	 */
 353
 354	return enabled;
 355}
 356
 357static void intel_hdcp_clear_keys(struct intel_display *display)
 358{
 359	intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
 360	intel_de_write(display, HDCP_KEY_STATUS,
 361		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 362}
 363
 364static int intel_hdcp_load_keys(struct intel_display *display)
 365{
 366	struct drm_i915_private *i915 = to_i915(display->drm);
 367	int ret;
 368	u32 val;
 369
 370	val = intel_de_read(display, HDCP_KEY_STATUS);
 371	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 372		return 0;
 373
 374	/*
 375	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
 376	 * out of reset. So if Key is not already loaded, its an error state.
 377	 */
 378	if (IS_HASWELL(i915) || IS_BROADWELL(i915))
 379		if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 380			return -ENXIO;
 381
 382	/*
 383	 * Initiate loading the HDCP key from fuses.
 384	 *
 385	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
 386	 * version 9 platforms (minus BXT) differ in the key load trigger
 387	 * process from other platforms. These platforms use the GT Driver
 388	 * Mailbox interface.
 389	 */
 390	if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) {
 391		ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
 392		if (ret) {
 393			drm_err(display->drm,
 394				"Failed to initiate HDCP key load (%d)\n",
 395				ret);
 396			return ret;
 397		}
 398	} else {
 399		intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 400	}
 401
 402	/* Wait for the keys to load (500us) */
 403	ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
 404				   HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 405				   10, 1, &val);
 406	if (ret)
 407		return ret;
 408	else if (!(val & HDCP_KEY_LOAD_STATUS))
 409		return -ENXIO;
 410
 411	/* Send Aksv over to PCH display for use in authentication */
 412	intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 413
 414	return 0;
 415}
 416
 417/* Returns updated SHA-1 index */
 418static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
 419{
 420	intel_de_write(display, HDCP_SHA_TEXT, sha_text);
 421	if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
 422		drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
 423		return -ETIMEDOUT;
 424	}
 425	return 0;
 426}
 427
 428static
 429u32 intel_hdcp_get_repeater_ctl(struct intel_display *display,
 430				enum transcoder cpu_transcoder, enum port port)
 431{
 432	if (DISPLAY_VER(display) >= 12) {
 433		switch (cpu_transcoder) {
 434		case TRANSCODER_A:
 435			return HDCP_TRANSA_REP_PRESENT |
 436			       HDCP_TRANSA_SHA1_M0;
 437		case TRANSCODER_B:
 438			return HDCP_TRANSB_REP_PRESENT |
 439			       HDCP_TRANSB_SHA1_M0;
 440		case TRANSCODER_C:
 441			return HDCP_TRANSC_REP_PRESENT |
 442			       HDCP_TRANSC_SHA1_M0;
 443		case TRANSCODER_D:
 444			return HDCP_TRANSD_REP_PRESENT |
 445			       HDCP_TRANSD_SHA1_M0;
 446		default:
 447			drm_err(display->drm, "Unknown transcoder %d\n",
 448				cpu_transcoder);
 449			return 0;
 450		}
 451	}
 452
 453	switch (port) {
 454	case PORT_A:
 455		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
 456	case PORT_B:
 457		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
 458	case PORT_C:
 459		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
 460	case PORT_D:
 461		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
 462	case PORT_E:
 463		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 464	default:
 465		drm_err(display->drm, "Unknown port %d\n", port);
 466		return 0;
 467	}
 468}
 469
 470static
 471int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 472				const struct intel_hdcp_shim *shim,
 473				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 474{
 475	struct intel_display *display = to_intel_display(connector);
 476	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 477	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 478	enum port port = dig_port->base.port;
 479	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 480	int ret, i, j, sha_idx;
 481
 482	/* Process V' values from the receiver */
 483	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 484		ret = shim->read_v_prime_part(dig_port, i, &vprime);
 485		if (ret)
 486			return ret;
 487		intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime);
 488	}
 489
 490	/*
 491	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
 492	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
 493	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
 494	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
 495	 * index will keep track of our progress through the 64 bytes as well as
 496	 * helping us work the 40-bit KSVs through our 32-bit register.
 497	 *
 498	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
 499	 */
 500	sha_idx = 0;
 501	sha_text = 0;
 502	sha_leftovers = 0;
 503	rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port);
 504	intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 505	for (i = 0; i < num_downstream; i++) {
 506		unsigned int sha_empty;
 507		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
 508
 509		/* Fill up the empty slots in sha_text and write it out */
 510		sha_empty = sizeof(sha_text) - sha_leftovers;
 511		for (j = 0; j < sha_empty; j++) {
 512			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
 513			sha_text |= ksv[j] << off;
 514		}
 515
 516		ret = intel_write_sha_text(display, sha_text);
 517		if (ret < 0)
 518			return ret;
 519
 520		/* Programming guide writes this every 64 bytes */
 521		sha_idx += sizeof(sha_text);
 522		if (!(sha_idx % 64))
 523			intel_de_write(display, HDCP_REP_CTL,
 524				       rep_ctl | HDCP_SHA1_TEXT_32);
 525
 526		/* Store the leftover bytes from the ksv in sha_text */
 527		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
 528		sha_text = 0;
 529		for (j = 0; j < sha_leftovers; j++)
 530			sha_text |= ksv[sha_empty + j] <<
 531					((sizeof(sha_text) - j - 1) * 8);
 532
 533		/*
 534		 * If we still have room in sha_text for more data, continue.
 535		 * Otherwise, write it out immediately.
 536		 */
 537		if (sizeof(sha_text) > sha_leftovers)
 538			continue;
 539
 540		ret = intel_write_sha_text(display, sha_text);
 541		if (ret < 0)
 542			return ret;
 543		sha_leftovers = 0;
 544		sha_text = 0;
 545		sha_idx += sizeof(sha_text);
 546	}
 547
 548	/*
 549	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
 550	 * bytes are leftover from the last ksv, we might be able to fit them
 551	 * all in sha_text (first 2 cases), or we might need to split them up
 552	 * into 2 writes (last 2 cases).
 553	 */
 554	if (sha_leftovers == 0) {
 555		/* Write 16 bits of text, 16 bits of M0 */
 556		intel_de_write(display, HDCP_REP_CTL,
 557			       rep_ctl | HDCP_SHA1_TEXT_16);
 558		ret = intel_write_sha_text(display,
 559					   bstatus[0] << 8 | bstatus[1]);
 560		if (ret < 0)
 561			return ret;
 562		sha_idx += sizeof(sha_text);
 563
 564		/* Write 32 bits of M0 */
 565		intel_de_write(display, HDCP_REP_CTL,
 566			       rep_ctl | HDCP_SHA1_TEXT_0);
 567		ret = intel_write_sha_text(display, 0);
 568		if (ret < 0)
 569			return ret;
 570		sha_idx += sizeof(sha_text);
 571
 572		/* Write 16 bits of M0 */
 573		intel_de_write(display, HDCP_REP_CTL,
 574			       rep_ctl | HDCP_SHA1_TEXT_16);
 575		ret = intel_write_sha_text(display, 0);
 576		if (ret < 0)
 577			return ret;
 578		sha_idx += sizeof(sha_text);
 579
 580	} else if (sha_leftovers == 1) {
 581		/* Write 24 bits of text, 8 bits of M0 */
 582		intel_de_write(display, HDCP_REP_CTL,
 583			       rep_ctl | HDCP_SHA1_TEXT_24);
 584		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 585		/* Only 24-bits of data, must be in the LSB */
 586		sha_text = (sha_text & 0xffffff00) >> 8;
 587		ret = intel_write_sha_text(display, sha_text);
 588		if (ret < 0)
 589			return ret;
 590		sha_idx += sizeof(sha_text);
 591
 592		/* Write 32 bits of M0 */
 593		intel_de_write(display, HDCP_REP_CTL,
 594			       rep_ctl | HDCP_SHA1_TEXT_0);
 595		ret = intel_write_sha_text(display, 0);
 596		if (ret < 0)
 597			return ret;
 598		sha_idx += sizeof(sha_text);
 599
 600		/* Write 24 bits of M0 */
 601		intel_de_write(display, HDCP_REP_CTL,
 602			       rep_ctl | HDCP_SHA1_TEXT_8);
 603		ret = intel_write_sha_text(display, 0);
 604		if (ret < 0)
 605			return ret;
 606		sha_idx += sizeof(sha_text);
 607
 608	} else if (sha_leftovers == 2) {
 609		/* Write 32 bits of text */
 610		intel_de_write(display, HDCP_REP_CTL,
 611			       rep_ctl | HDCP_SHA1_TEXT_32);
 612		sha_text |= bstatus[0] << 8 | bstatus[1];
 613		ret = intel_write_sha_text(display, sha_text);
 614		if (ret < 0)
 615			return ret;
 616		sha_idx += sizeof(sha_text);
 617
 618		/* Write 64 bits of M0 */
 619		intel_de_write(display, HDCP_REP_CTL,
 620			       rep_ctl | HDCP_SHA1_TEXT_0);
 621		for (i = 0; i < 2; i++) {
 622			ret = intel_write_sha_text(display, 0);
 623			if (ret < 0)
 624				return ret;
 625			sha_idx += sizeof(sha_text);
 626		}
 627
 628		/*
 629		 * Terminate the SHA-1 stream by hand. For the other leftover
 630		 * cases this is appended by the hardware.
 631		 */
 632		intel_de_write(display, HDCP_REP_CTL,
 633			       rep_ctl | HDCP_SHA1_TEXT_32);
 634		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
 635		ret = intel_write_sha_text(display, sha_text);
 636		if (ret < 0)
 637			return ret;
 638		sha_idx += sizeof(sha_text);
 639	} else if (sha_leftovers == 3) {
 640		/* Write 32 bits of text (filled from LSB) */
 641		intel_de_write(display, HDCP_REP_CTL,
 642			       rep_ctl | HDCP_SHA1_TEXT_32);
 643		sha_text |= bstatus[0];
 644		ret = intel_write_sha_text(display, sha_text);
 645		if (ret < 0)
 646			return ret;
 647		sha_idx += sizeof(sha_text);
 648
 649		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
 650		intel_de_write(display, HDCP_REP_CTL,
 651			       rep_ctl | HDCP_SHA1_TEXT_8);
 652		ret = intel_write_sha_text(display, bstatus[1]);
 653		if (ret < 0)
 654			return ret;
 655		sha_idx += sizeof(sha_text);
 656
 657		/* Write 32 bits of M0 */
 658		intel_de_write(display, HDCP_REP_CTL,
 659			       rep_ctl | HDCP_SHA1_TEXT_0);
 660		ret = intel_write_sha_text(display, 0);
 661		if (ret < 0)
 662			return ret;
 663		sha_idx += sizeof(sha_text);
 664
 665		/* Write 8 bits of M0 */
 666		intel_de_write(display, HDCP_REP_CTL,
 667			       rep_ctl | HDCP_SHA1_TEXT_24);
 668		ret = intel_write_sha_text(display, 0);
 669		if (ret < 0)
 670			return ret;
 671		sha_idx += sizeof(sha_text);
 672	} else {
 673		drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n",
 674			    sha_leftovers);
 675		return -EINVAL;
 676	}
 677
 678	intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 679	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 680	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 681		ret = intel_write_sha_text(display, 0);
 682		if (ret < 0)
 683			return ret;
 684		sha_idx += sizeof(sha_text);
 685	}
 686
 687	/*
 688	 * Last write gets the length of the concatenation in bits. That is:
 689	 *  - 5 bytes per device
 690	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
 691	 */
 692	sha_text = (num_downstream * 5 + 10) * 8;
 693	ret = intel_write_sha_text(display, sha_text);
 694	if (ret < 0)
 695		return ret;
 696
 697	/* Tell the HW we're done with the hash and wait for it to ACK */
 698	intel_de_write(display, HDCP_REP_CTL,
 699		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 700	if (intel_de_wait_for_set(display, HDCP_REP_CTL,
 701				  HDCP_SHA1_COMPLETE, 1)) {
 702		drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
 703		return -ETIMEDOUT;
 704	}
 705	if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
 706		drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n");
 707		return -ENXIO;
 708	}
 709
 710	return 0;
 711}
 712
 713/* Implements Part 2 of the HDCP authorization procedure */
 714static
 715int intel_hdcp_auth_downstream(struct intel_connector *connector)
 716{
 717	struct intel_display *display = to_intel_display(connector);
 718	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 719	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 720	u8 bstatus[2], num_downstream, *ksv_fifo;
 721	int ret, i, tries = 3;
 722
 723	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
 724	if (ret) {
 725		drm_dbg_kms(display->drm,
 726			    "KSV list failed to become ready (%d)\n", ret);
 727		return ret;
 728	}
 729
 730	ret = shim->read_bstatus(dig_port, bstatus);
 731	if (ret)
 732		return ret;
 733
 734	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 735	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
 736		drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n");
 737		return -EPERM;
 738	}
 739
 740	/*
 741	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
 742	 * the HDCP encryption. That implies that repeater can't have its own
 743	 * display. As there is no consumption of encrypted content in the
 744	 * repeater with 0 downstream devices, we are failing the
 745	 * authentication.
 746	 */
 747	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 748	if (num_downstream == 0) {
 749		drm_dbg_kms(display->drm,
 750			    "Repeater with zero downstream devices\n");
 751		return -EINVAL;
 752	}
 753
 754	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 755	if (!ksv_fifo) {
 756		drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n");
 757		return -ENOMEM;
 758	}
 759
 760	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
 761	if (ret)
 762		goto err;
 763
 764	if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo,
 765					num_downstream) > 0) {
 766		drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n");
 767		ret = -EPERM;
 768		goto err;
 769	}
 770
 771	/*
 772	 * When V prime mismatches, DP Spec mandates re-read of
 773	 * V prime atleast twice.
 774	 */
 775	for (i = 0; i < tries; i++) {
 776		ret = intel_hdcp_validate_v_prime(connector, shim,
 777						  ksv_fifo, num_downstream,
 778						  bstatus);
 779		if (!ret)
 780			break;
 781	}
 782
 783	if (i == tries) {
 784		drm_dbg_kms(display->drm,
 785			    "V Prime validation failed.(%d)\n", ret);
 786		goto err;
 787	}
 788
 789	drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n",
 790		    num_downstream);
 791	ret = 0;
 792err:
 793	kfree(ksv_fifo);
 794	return ret;
 795}
 796
 797/* Implements Part 1 of the HDCP authorization procedure */
 798static int intel_hdcp_auth(struct intel_connector *connector)
 799{
 800	struct intel_display *display = to_intel_display(connector);
 801	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 802	struct intel_hdcp *hdcp = &connector->hdcp;
 803	const struct intel_hdcp_shim *shim = hdcp->shim;
 804	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 805	enum port port = dig_port->base.port;
 806	unsigned long r0_prime_gen_start;
 807	int ret, i, tries = 2;
 808	union {
 809		u32 reg[2];
 810		u8 shim[DRM_HDCP_AN_LEN];
 811	} an;
 812	union {
 813		u32 reg[2];
 814		u8 shim[DRM_HDCP_KSV_LEN];
 815	} bksv;
 816	union {
 817		u32 reg;
 818		u8 shim[DRM_HDCP_RI_LEN];
 819	} ri;
 820	bool repeater_present, hdcp_capable;
 821
 822	/*
 823	 * Detects whether the display is HDCP capable. Although we check for
 824	 * valid Bksv below, the HDCP over DP spec requires that we check
 825	 * whether the display supports HDCP before we write An. For HDMI
 826	 * displays, this is not necessary.
 827	 */
 828	if (shim->hdcp_get_capability) {
 829		ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
 830		if (ret)
 831			return ret;
 832		if (!hdcp_capable) {
 833			drm_dbg_kms(display->drm,
 834				    "Panel is not HDCP capable\n");
 835			return -EINVAL;
 836		}
 837	}
 838
 839	/* Initialize An with 2 random values and acquire it */
 840	for (i = 0; i < 2; i++)
 841		intel_de_write(display,
 842			       HDCP_ANINIT(display, cpu_transcoder, port),
 843			       get_random_u32());
 844	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
 845		       HDCP_CONF_CAPTURE_AN);
 846
 847	/* Wait for An to be acquired */
 848	if (intel_de_wait_for_set(display,
 849				  HDCP_STATUS(display, cpu_transcoder, port),
 850				  HDCP_STATUS_AN_READY, 1)) {
 851		drm_err(display->drm, "Timed out waiting for An\n");
 852		return -ETIMEDOUT;
 853	}
 854
 855	an.reg[0] = intel_de_read(display,
 856				  HDCP_ANLO(display, cpu_transcoder, port));
 857	an.reg[1] = intel_de_read(display,
 858				  HDCP_ANHI(display, cpu_transcoder, port));
 859	ret = shim->write_an_aksv(dig_port, an.shim);
 860	if (ret)
 861		return ret;
 862
 863	r0_prime_gen_start = jiffies;
 864
 865	memset(&bksv, 0, sizeof(bksv));
 866
 867	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
 868	if (ret < 0)
 869		return ret;
 870
 871	if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) {
 872		drm_err(display->drm, "BKSV is revoked\n");
 873		return -EPERM;
 874	}
 875
 876	intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port),
 877		       bksv.reg[0]);
 878	intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port),
 879		       bksv.reg[1]);
 880
 881	ret = shim->repeater_present(dig_port, &repeater_present);
 882	if (ret)
 883		return ret;
 884	if (repeater_present)
 885		intel_de_write(display, HDCP_REP_CTL,
 886			       intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port));
 887
 888	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
 889	if (ret)
 890		return ret;
 891
 892	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
 893		       HDCP_CONF_AUTH_AND_ENC);
 894
 895	/* Wait for R0 ready */
 896	if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
 897		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
 898		drm_err(display->drm, "Timed out waiting for R0 ready\n");
 899		return -ETIMEDOUT;
 900	}
 901
 902	/*
 903	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
 904	 * some monitors can take longer than this. We'll set the timeout at
 905	 * 300ms just to be sure.
 906	 *
 907	 * On DP, there's an R0_READY bit available but no such bit
 908	 * exists on HDMI. Since the upper-bound is the same, we'll just do
 909	 * the stupid thing instead of polling on one and not the other.
 910	 */
 911	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 912
 913	tries = 3;
 914
 915	/*
 916	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
 917	 * of R0 mismatch.
 918	 */
 919	for (i = 0; i < tries; i++) {
 920		ri.reg = 0;
 921		ret = shim->read_ri_prime(dig_port, ri.shim);
 922		if (ret)
 923			return ret;
 924		intel_de_write(display,
 925			       HDCP_RPRIME(display, cpu_transcoder, port),
 926			       ri.reg);
 927
 928		/* Wait for Ri prime match */
 929		if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
 930			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 931			break;
 932	}
 933
 934	if (i == tries) {
 935		drm_dbg_kms(display->drm,
 936			    "Timed out waiting for Ri prime match (%x)\n",
 937			    intel_de_read(display,
 938					  HDCP_STATUS(display, cpu_transcoder, port)));
 939		return -ETIMEDOUT;
 940	}
 941
 942	/* Wait for encryption confirmation */
 943	if (intel_de_wait_for_set(display,
 944				  HDCP_STATUS(display, cpu_transcoder, port),
 945				  HDCP_STATUS_ENC,
 946				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 947		drm_err(display->drm, "Timed out waiting for encryption\n");
 948		return -ETIMEDOUT;
 949	}
 950
 951	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
 952	if (shim->stream_encryption) {
 953		ret = shim->stream_encryption(connector, true);
 954		if (ret) {
 955			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
 956				connector->base.base.id, connector->base.name);
 957			return ret;
 958		}
 959		drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
 960			    transcoder_name(hdcp->stream_transcoder));
 961	}
 962
 963	if (repeater_present)
 964		return intel_hdcp_auth_downstream(connector);
 965
 966	drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n");
 967	return 0;
 968}
 969
 970static int _intel_hdcp_disable(struct intel_connector *connector)
 971{
 972	struct intel_display *display = to_intel_display(connector);
 973	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
 974	struct intel_hdcp *hdcp = &connector->hdcp;
 975	enum port port = dig_port->base.port;
 976	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 977	u32 repeater_ctl;
 978	int ret;
 979
 980	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
 981		    connector->base.base.id, connector->base.name);
 982
 983	if (hdcp->shim->stream_encryption) {
 984		ret = hdcp->shim->stream_encryption(connector, false);
 985		if (ret) {
 986			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
 987				connector->base.base.id, connector->base.name);
 988			return ret;
 989		}
 990		drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
 991			    transcoder_name(hdcp->stream_transcoder));
 992		/*
 993		 * If there are other connectors on this port using HDCP,
 994		 * don't disable it until it disabled HDCP encryption for
 995		 * all connectors in MST topology.
 996		 */
 997		if (dig_port->num_hdcp_streams > 0)
 998			return 0;
 999	}
1000
1001	hdcp->hdcp_encrypted = false;
1002	intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
1003	if (intel_de_wait_for_clear(display,
1004				    HDCP_STATUS(display, cpu_transcoder, port),
1005				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
1006		drm_err(display->drm,
1007			"Failed to disable HDCP, timeout clearing status\n");
1008		return -ETIMEDOUT;
1009	}
1010
1011	repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder,
1012						   port);
1013	intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0);
 
1014
1015	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
1016	if (ret) {
1017		drm_err(display->drm, "Failed to disable HDCP signalling\n");
1018		return ret;
1019	}
1020
1021	drm_dbg_kms(display->drm, "HDCP is disabled\n");
1022	return 0;
1023}
1024
1025static int intel_hdcp1_enable(struct intel_connector *connector)
1026{
1027	struct intel_display *display = to_intel_display(connector);
1028	struct intel_hdcp *hdcp = &connector->hdcp;
1029	int i, ret, tries = 3;
1030
1031	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
1032		    connector->base.base.id, connector->base.name);
1033
1034	if (!hdcp_key_loadable(display)) {
1035		drm_err(display->drm, "HDCP key Load is not possible\n");
1036		return -ENXIO;
1037	}
1038
1039	for (i = 0; i < KEY_LOAD_TRIES; i++) {
1040		ret = intel_hdcp_load_keys(display);
1041		if (!ret)
1042			break;
1043		intel_hdcp_clear_keys(display);
1044	}
1045	if (ret) {
1046		drm_err(display->drm, "Could not load HDCP keys, (%d)\n",
1047			ret);
1048		return ret;
1049	}
1050
1051	/* Incase of authentication failures, HDCP spec expects reauth. */
1052	for (i = 0; i < tries; i++) {
1053		ret = intel_hdcp_auth(connector);
1054		if (!ret) {
1055			hdcp->hdcp_encrypted = true;
1056			return 0;
1057		}
1058
1059		drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret);
1060
1061		/* Ensuring HDCP encryption and signalling are stopped. */
1062		_intel_hdcp_disable(connector);
1063	}
1064
1065	drm_dbg_kms(display->drm,
1066		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1067	return ret;
1068}
1069
1070static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1071{
1072	return container_of(hdcp, struct intel_connector, hdcp);
1073}
1074
1075static void intel_hdcp_update_value(struct intel_connector *connector,
1076				    u64 value, bool update_property)
1077{
1078	struct intel_display *display = to_intel_display(connector);
1079	struct drm_i915_private *i915 = to_i915(display->drm);
1080	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1081	struct intel_hdcp *hdcp = &connector->hdcp;
1082
1083	drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex));
1084
1085	if (hdcp->value == value)
1086		return;
1087
1088	drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex));
1089
1090	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1091		if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0))
1092			dig_port->num_hdcp_streams--;
1093	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1094		dig_port->num_hdcp_streams++;
1095	}
1096
1097	hdcp->value = value;
1098	if (update_property) {
1099		drm_connector_get(&connector->base);
1100		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
1101			drm_connector_put(&connector->base);
1102	}
1103}
1104
1105/* Implements Part 3 of the HDCP authorization procedure */
1106static int intel_hdcp_check_link(struct intel_connector *connector)
1107{
1108	struct intel_display *display = to_intel_display(connector);
1109	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1110	struct intel_hdcp *hdcp = &connector->hdcp;
1111	enum port port = dig_port->base.port;
1112	enum transcoder cpu_transcoder;
1113	int ret = 0;
1114
1115	mutex_lock(&hdcp->mutex);
1116	mutex_lock(&dig_port->hdcp_mutex);
1117
1118	cpu_transcoder = hdcp->cpu_transcoder;
1119
1120	/* Check_link valid only when HDCP1.4 is enabled */
1121	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1122	    !hdcp->hdcp_encrypted) {
1123		ret = -EINVAL;
1124		goto out;
1125	}
1126
1127	if (drm_WARN_ON(display->drm,
1128			!intel_hdcp_in_use(display, cpu_transcoder, port))) {
1129		drm_err(display->drm,
1130			"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
1131			connector->base.base.id, connector->base.name,
1132			intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)));
1133		ret = -ENXIO;
1134		intel_hdcp_update_value(connector,
1135					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1136					true);
1137		goto out;
1138	}
1139
1140	if (hdcp->shim->check_link(dig_port, connector)) {
1141		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1142			intel_hdcp_update_value(connector,
1143				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1144		}
1145		goto out;
1146	}
1147
1148	drm_dbg_kms(display->drm,
1149		    "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
1150		    connector->base.base.id, connector->base.name);
1151
1152	ret = _intel_hdcp_disable(connector);
1153	if (ret) {
1154		drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret);
1155		intel_hdcp_update_value(connector,
1156					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1157					true);
1158		goto out;
1159	}
1160
1161	ret = intel_hdcp1_enable(connector);
1162	if (ret) {
1163		drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
1164		intel_hdcp_update_value(connector,
1165					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1166					true);
1167		goto out;
1168	}
1169
1170out:
1171	mutex_unlock(&dig_port->hdcp_mutex);
1172	mutex_unlock(&hdcp->mutex);
1173	return ret;
1174}
1175
1176static void intel_hdcp_prop_work(struct work_struct *work)
1177{
1178	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1179					       prop_work);
1180	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1181	struct intel_display *display = to_intel_display(connector);
1182
1183	drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
1184	mutex_lock(&hdcp->mutex);
1185
1186	/*
1187	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1188	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1189	 * we're running just after hdcp has been disabled, so just exit
1190	 */
1191	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1192		drm_hdcp_update_content_protection(&connector->base,
1193						   hdcp->value);
1194
1195	mutex_unlock(&hdcp->mutex);
1196	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1197
1198	drm_connector_put(&connector->base);
1199}
1200
1201bool is_hdcp_supported(struct intel_display *display, enum port port)
1202{
1203	return DISPLAY_RUNTIME_INFO(display)->has_hdcp &&
1204		(DISPLAY_VER(display) >= 12 || port < PORT_E);
1205}
1206
1207static int
1208hdcp2_prepare_ake_init(struct intel_connector *connector,
1209		       struct hdcp2_ake_init *ake_data)
1210{
1211	struct intel_display *display = to_intel_display(connector);
1212	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1213	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1214	struct i915_hdcp_arbiter *arbiter;
1215	int ret;
1216
1217	mutex_lock(&display->hdcp.hdcp_mutex);
1218	arbiter = display->hdcp.arbiter;
1219
1220	if (!arbiter || !arbiter->ops) {
1221		mutex_unlock(&display->hdcp.hdcp_mutex);
1222		return -EINVAL;
1223	}
1224
1225	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1226	if (ret)
1227		drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n",
1228			    ret);
1229	mutex_unlock(&display->hdcp.hdcp_mutex);
1230
1231	return ret;
1232}
1233
1234static int
1235hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1236				struct hdcp2_ake_send_cert *rx_cert,
1237				bool *paired,
1238				struct hdcp2_ake_no_stored_km *ek_pub_km,
1239				size_t *msg_sz)
1240{
1241	struct intel_display *display = to_intel_display(connector);
1242	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1243	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1244	struct i915_hdcp_arbiter *arbiter;
1245	int ret;
1246
1247	mutex_lock(&display->hdcp.hdcp_mutex);
1248	arbiter = display->hdcp.arbiter;
1249
1250	if (!arbiter || !arbiter->ops) {
1251		mutex_unlock(&display->hdcp.hdcp_mutex);
1252		return -EINVAL;
1253	}
1254
1255	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1256							 rx_cert, paired,
1257							 ek_pub_km, msg_sz);
1258	if (ret < 0)
1259		drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n",
1260			    ret);
1261	mutex_unlock(&display->hdcp.hdcp_mutex);
1262
1263	return ret;
1264}
1265
1266static int hdcp2_verify_hprime(struct intel_connector *connector,
1267			       struct hdcp2_ake_send_hprime *rx_hprime)
1268{
1269	struct intel_display *display = to_intel_display(connector);
1270	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1271	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1272	struct i915_hdcp_arbiter *arbiter;
1273	int ret;
1274
1275	mutex_lock(&display->hdcp.hdcp_mutex);
1276	arbiter = display->hdcp.arbiter;
1277
1278	if (!arbiter || !arbiter->ops) {
1279		mutex_unlock(&display->hdcp.hdcp_mutex);
1280		return -EINVAL;
1281	}
1282
1283	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1284	if (ret < 0)
1285		drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret);
1286	mutex_unlock(&display->hdcp.hdcp_mutex);
1287
1288	return ret;
1289}
1290
1291static int
1292hdcp2_store_pairing_info(struct intel_connector *connector,
1293			 struct hdcp2_ake_send_pairing_info *pairing_info)
1294{
1295	struct intel_display *display = to_intel_display(connector);
1296	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1297	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1298	struct i915_hdcp_arbiter *arbiter;
1299	int ret;
1300
1301	mutex_lock(&display->hdcp.hdcp_mutex);
1302	arbiter = display->hdcp.arbiter;
1303
1304	if (!arbiter || !arbiter->ops) {
1305		mutex_unlock(&display->hdcp.hdcp_mutex);
1306		return -EINVAL;
1307	}
1308
1309	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1310	if (ret < 0)
1311		drm_dbg_kms(display->drm, "Store pairing info failed. %d\n",
1312			    ret);
1313	mutex_unlock(&display->hdcp.hdcp_mutex);
1314
1315	return ret;
1316}
1317
1318static int
1319hdcp2_prepare_lc_init(struct intel_connector *connector,
1320		      struct hdcp2_lc_init *lc_init)
1321{
1322	struct intel_display *display = to_intel_display(connector);
1323	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1324	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1325	struct i915_hdcp_arbiter *arbiter;
1326	int ret;
1327
1328	mutex_lock(&display->hdcp.hdcp_mutex);
1329	arbiter = display->hdcp.arbiter;
1330
1331	if (!arbiter || !arbiter->ops) {
1332		mutex_unlock(&display->hdcp.hdcp_mutex);
1333		return -EINVAL;
1334	}
1335
1336	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1337	if (ret < 0)
1338		drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n",
1339			    ret);
1340	mutex_unlock(&display->hdcp.hdcp_mutex);
1341
1342	return ret;
1343}
1344
1345static int
1346hdcp2_verify_lprime(struct intel_connector *connector,
1347		    struct hdcp2_lc_send_lprime *rx_lprime)
1348{
1349	struct intel_display *display = to_intel_display(connector);
1350	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1351	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1352	struct i915_hdcp_arbiter *arbiter;
1353	int ret;
1354
1355	mutex_lock(&display->hdcp.hdcp_mutex);
1356	arbiter = display->hdcp.arbiter;
1357
1358	if (!arbiter || !arbiter->ops) {
1359		mutex_unlock(&display->hdcp.hdcp_mutex);
1360		return -EINVAL;
1361	}
1362
1363	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1364	if (ret < 0)
1365		drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n",
1366			    ret);
1367	mutex_unlock(&display->hdcp.hdcp_mutex);
1368
1369	return ret;
1370}
1371
1372static int hdcp2_prepare_skey(struct intel_connector *connector,
1373			      struct hdcp2_ske_send_eks *ske_data)
1374{
1375	struct intel_display *display = to_intel_display(connector);
1376	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1377	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1378	struct i915_hdcp_arbiter *arbiter;
1379	int ret;
1380
1381	mutex_lock(&display->hdcp.hdcp_mutex);
1382	arbiter = display->hdcp.arbiter;
1383
1384	if (!arbiter || !arbiter->ops) {
1385		mutex_unlock(&display->hdcp.hdcp_mutex);
1386		return -EINVAL;
1387	}
1388
1389	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1390	if (ret < 0)
1391		drm_dbg_kms(display->drm, "Get session key failed. %d\n",
1392			    ret);
1393	mutex_unlock(&display->hdcp.hdcp_mutex);
1394
1395	return ret;
1396}
1397
1398static int
1399hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1400				      struct hdcp2_rep_send_receiverid_list
1401								*rep_topology,
1402				      struct hdcp2_rep_send_ack *rep_send_ack)
1403{
1404	struct intel_display *display = to_intel_display(connector);
1405	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1406	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1407	struct i915_hdcp_arbiter *arbiter;
1408	int ret;
1409
1410	mutex_lock(&display->hdcp.hdcp_mutex);
1411	arbiter = display->hdcp.arbiter;
1412
1413	if (!arbiter || !arbiter->ops) {
1414		mutex_unlock(&display->hdcp.hdcp_mutex);
1415		return -EINVAL;
1416	}
1417
1418	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1419							    data,
1420							    rep_topology,
1421							    rep_send_ack);
1422	if (ret < 0)
1423		drm_dbg_kms(display->drm,
1424			    "Verify rep topology failed. %d\n", ret);
1425	mutex_unlock(&display->hdcp.hdcp_mutex);
1426
1427	return ret;
1428}
1429
1430static int
1431hdcp2_verify_mprime(struct intel_connector *connector,
1432		    struct hdcp2_rep_stream_ready *stream_ready)
1433{
1434	struct intel_display *display = to_intel_display(connector);
1435	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1436	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1437	struct i915_hdcp_arbiter *arbiter;
1438	int ret;
1439
1440	mutex_lock(&display->hdcp.hdcp_mutex);
1441	arbiter = display->hdcp.arbiter;
1442
1443	if (!arbiter || !arbiter->ops) {
1444		mutex_unlock(&display->hdcp.hdcp_mutex);
1445		return -EINVAL;
1446	}
1447
1448	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1449	if (ret < 0)
1450		drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret);
1451	mutex_unlock(&display->hdcp.hdcp_mutex);
1452
1453	return ret;
1454}
1455
1456static int hdcp2_authenticate_port(struct intel_connector *connector)
1457{
1458	struct intel_display *display = to_intel_display(connector);
1459	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1460	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1461	struct i915_hdcp_arbiter *arbiter;
1462	int ret;
1463
1464	mutex_lock(&display->hdcp.hdcp_mutex);
1465	arbiter = display->hdcp.arbiter;
1466
1467	if (!arbiter || !arbiter->ops) {
1468		mutex_unlock(&display->hdcp.hdcp_mutex);
1469		return -EINVAL;
1470	}
1471
1472	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1473	if (ret < 0)
1474		drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n",
1475			    ret);
1476	mutex_unlock(&display->hdcp.hdcp_mutex);
1477
1478	return ret;
1479}
1480
1481static int hdcp2_close_session(struct intel_connector *connector)
1482{
1483	struct intel_display *display = to_intel_display(connector);
1484	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1485	struct i915_hdcp_arbiter *arbiter;
1486	int ret;
1487
1488	mutex_lock(&display->hdcp.hdcp_mutex);
1489	arbiter = display->hdcp.arbiter;
1490
1491	if (!arbiter || !arbiter->ops) {
1492		mutex_unlock(&display->hdcp.hdcp_mutex);
1493		return -EINVAL;
1494	}
1495
1496	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1497					     &dig_port->hdcp_port_data);
1498	mutex_unlock(&display->hdcp.hdcp_mutex);
1499
1500	return ret;
1501}
1502
1503static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1504{
1505	return hdcp2_close_session(connector);
1506}
1507
1508/* Authentication flow starts from here */
1509static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1510{
1511	struct intel_display *display = to_intel_display(connector);
1512	struct intel_digital_port *dig_port =
1513		intel_attached_dig_port(connector);
1514	struct intel_hdcp *hdcp = &connector->hdcp;
1515	union {
1516		struct hdcp2_ake_init ake_init;
1517		struct hdcp2_ake_send_cert send_cert;
1518		struct hdcp2_ake_no_stored_km no_stored_km;
1519		struct hdcp2_ake_send_hprime send_hprime;
1520		struct hdcp2_ake_send_pairing_info pairing_info;
1521	} msgs;
1522	const struct intel_hdcp_shim *shim = hdcp->shim;
1523	size_t size;
1524	int ret, i, max_retries;
1525
1526	/* Init for seq_num */
1527	hdcp->seq_num_v = 0;
1528	hdcp->seq_num_m = 0;
1529
1530	if (intel_encoder_is_dp(&dig_port->base) ||
1531	    intel_encoder_is_mst(&dig_port->base))
1532		max_retries = 10;
1533	else
1534		max_retries = 1;
1535
1536	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1537	if (ret < 0)
1538		return ret;
1539
1540	/*
1541	 * Retry the first read and write to downstream at least 10 times
1542	 * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
1543	 * (dock decides to stop advertising hdcp2 capability for some reason).
1544	 * The reason being that during suspend resume dock usually keeps the
1545	 * HDCP2 registers inaccesible causing AUX error. This wouldn't be a
1546	 * big problem if the userspace just kept retrying with some delay while
1547	 * it continues to play low value content but most userpace applications
1548	 * end up throwing an error when it receives one from KMD. This makes
1549	 * sure we give the dock and the sink devices to complete its power cycle
1550	 * and then try HDCP authentication. The values of 10 and delay of 50ms
1551	 * was decided based on multiple trial and errors.
1552	 */
1553	for (i = 0; i < max_retries; i++) {
1554		if (!intel_hdcp2_get_capability(connector)) {
1555			msleep(50);
1556			continue;
1557		}
1558
1559		ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1560					  sizeof(msgs.ake_init));
1561		if (ret < 0)
1562			continue;
1563
1564		ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1565					 &msgs.send_cert, sizeof(msgs.send_cert));
1566		if (ret > 0)
1567			break;
1568	}
1569
 
 
1570	if (ret < 0)
1571		return ret;
1572
1573	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1574		drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n");
1575		return -EINVAL;
1576	}
1577
1578	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1579
1580	if (drm_hdcp_check_ksvs_revoked(display->drm,
1581					msgs.send_cert.cert_rx.receiver_id,
1582					1) > 0) {
1583		drm_err(display->drm, "Receiver ID is revoked\n");
1584		return -EPERM;
1585	}
1586
1587	/*
1588	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1589	 * stored also.
1590	 */
1591	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1592					      &hdcp->is_paired,
1593					      &msgs.no_stored_km, &size);
1594	if (ret < 0)
1595		return ret;
1596
1597	ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1598	if (ret < 0)
1599		return ret;
1600
1601	ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1602				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1603	if (ret < 0)
1604		return ret;
1605
1606	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1607	if (ret < 0)
1608		return ret;
1609
1610	if (!hdcp->is_paired) {
1611		/* Pairing is required */
1612		ret = shim->read_2_2_msg(connector,
1613					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1614					 &msgs.pairing_info,
1615					 sizeof(msgs.pairing_info));
1616		if (ret < 0)
1617			return ret;
1618
1619		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1620		if (ret < 0)
1621			return ret;
1622		hdcp->is_paired = true;
1623	}
1624
1625	return 0;
1626}
1627
1628static int hdcp2_locality_check(struct intel_connector *connector)
1629{
 
1630	struct intel_hdcp *hdcp = &connector->hdcp;
1631	union {
1632		struct hdcp2_lc_init lc_init;
1633		struct hdcp2_lc_send_lprime send_lprime;
1634	} msgs;
1635	const struct intel_hdcp_shim *shim = hdcp->shim;
1636	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1637
1638	for (i = 0; i < tries; i++) {
1639		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1640		if (ret < 0)
1641			continue;
1642
1643		ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1644				      sizeof(msgs.lc_init));
1645		if (ret < 0)
1646			continue;
1647
1648		ret = shim->read_2_2_msg(connector,
1649					 HDCP_2_2_LC_SEND_LPRIME,
1650					 &msgs.send_lprime,
1651					 sizeof(msgs.send_lprime));
1652		if (ret < 0)
1653			continue;
1654
1655		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1656		if (!ret)
1657			break;
1658	}
1659
1660	return ret;
1661}
1662
1663static int hdcp2_session_key_exchange(struct intel_connector *connector)
1664{
 
1665	struct intel_hdcp *hdcp = &connector->hdcp;
1666	struct hdcp2_ske_send_eks send_eks;
1667	int ret;
1668
1669	ret = hdcp2_prepare_skey(connector, &send_eks);
1670	if (ret < 0)
1671		return ret;
1672
1673	ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1674					sizeof(send_eks));
1675	if (ret < 0)
1676		return ret;
1677
1678	return 0;
1679}
1680
1681static
1682int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1683{
1684	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1685	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1686	struct intel_hdcp *hdcp = &connector->hdcp;
1687	union {
1688		struct hdcp2_rep_stream_manage stream_manage;
1689		struct hdcp2_rep_stream_ready stream_ready;
1690	} msgs;
1691	const struct intel_hdcp_shim *shim = hdcp->shim;
1692	int ret, streams_size_delta, i;
1693
1694	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1695		return -ERANGE;
1696
1697	/* Prepare RepeaterAuth_Stream_Manage msg */
1698	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1699	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1700
1701	msgs.stream_manage.k = cpu_to_be16(data->k);
 
1702
1703	for (i = 0; i < data->k; i++) {
1704		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1705		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1706	}
1707
1708	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1709				sizeof(struct hdcp2_streamid_type);
1710	/* Send it to Repeater */
1711	ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1712				  sizeof(msgs.stream_manage) - streams_size_delta);
1713	if (ret < 0)
1714		goto out;
1715
1716	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1717				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1718	if (ret < 0)
1719		goto out;
1720
1721	data->seq_num_m = hdcp->seq_num_m;
 
1722
1723	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
 
 
1724
1725out:
1726	hdcp->seq_num_m++;
1727
1728	return ret;
 
 
 
 
 
1729}
1730
1731static
1732int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1733{
1734	struct intel_display *display = to_intel_display(connector);
1735	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1736	struct intel_hdcp *hdcp = &connector->hdcp;
1737	union {
1738		struct hdcp2_rep_send_receiverid_list recvid_list;
1739		struct hdcp2_rep_send_ack rep_ack;
1740	} msgs;
1741	const struct intel_hdcp_shim *shim = hdcp->shim;
1742	u32 seq_num_v, device_cnt;
1743	u8 *rx_info;
1744	int ret;
1745
1746	ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1747				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1748	if (ret < 0)
1749		return ret;
1750
1751	rx_info = msgs.recvid_list.rx_info;
1752
1753	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1754	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1755		drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n");
1756		return -EINVAL;
1757	}
1758
1759	/*
1760	 * MST topology is not Type 1 capable if it contains a downstream
1761	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1762	 */
1763	dig_port->hdcp_mst_type1_capable =
1764		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1765		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1766
1767	if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
1768		drm_dbg_kms(display->drm,
1769			    "HDCP1.x or 2.0 Legacy Device Downstream\n");
1770		return -EINVAL;
1771	}
1772
1773	/* Converting and Storing the seq_num_v to local variable as DWORD */
1774	seq_num_v =
1775		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1776
1777	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1778		drm_dbg_kms(display->drm,
1779			    "Non zero Seq_num_v at first RecvId_List msg\n");
1780		return -EINVAL;
1781	}
1782
1783	if (seq_num_v < hdcp->seq_num_v) {
1784		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1785		drm_dbg_kms(display->drm, "Seq_num_v roll over.\n");
1786		return -EINVAL;
1787	}
1788
1789	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1790		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1791	if (drm_hdcp_check_ksvs_revoked(display->drm,
1792					msgs.recvid_list.receiver_ids,
1793					device_cnt) > 0) {
1794		drm_err(display->drm, "Revoked receiver ID(s) is in list\n");
1795		return -EPERM;
1796	}
1797
1798	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1799						    &msgs.recvid_list,
1800						    &msgs.rep_ack);
1801	if (ret < 0)
1802		return ret;
1803
1804	hdcp->seq_num_v = seq_num_v;
1805	ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1806				  sizeof(msgs.rep_ack));
1807	if (ret < 0)
1808		return ret;
1809
1810	return 0;
1811}
1812
 
 
 
 
 
 
 
 
 
 
 
1813static int hdcp2_authenticate_sink(struct intel_connector *connector)
1814{
1815	struct intel_display *display = to_intel_display(connector);
 
1816	struct intel_hdcp *hdcp = &connector->hdcp;
1817	const struct intel_hdcp_shim *shim = hdcp->shim;
1818	int ret;
1819
1820	ret = hdcp2_authentication_key_exchange(connector);
1821	if (ret < 0) {
1822		drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret);
1823		return ret;
1824	}
1825
1826	ret = hdcp2_locality_check(connector);
1827	if (ret < 0) {
1828		drm_dbg_kms(display->drm,
1829			    "Locality Check failed. Err : %d\n", ret);
1830		return ret;
1831	}
1832
1833	ret = hdcp2_session_key_exchange(connector);
1834	if (ret < 0) {
1835		drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret);
1836		return ret;
1837	}
1838
1839	if (shim->config_stream_type) {
1840		ret = shim->config_stream_type(connector,
1841					       hdcp->is_repeater,
1842					       hdcp->content_type);
1843		if (ret < 0)
1844			return ret;
1845	}
1846
1847	if (hdcp->is_repeater) {
1848		ret = hdcp2_authenticate_repeater_topology(connector);
1849		if (ret < 0) {
1850			drm_dbg_kms(display->drm,
1851				    "Repeater Auth Failed. Err: %d\n", ret);
1852			return ret;
1853		}
1854	}
1855
1856	return ret;
1857}
1858
1859static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1860{
1861	struct intel_display *display = to_intel_display(connector);
1862	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1863	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1864	struct intel_hdcp *hdcp = &connector->hdcp;
1865	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1866	enum port port = dig_port->base.port;
1867	int ret = 0;
1868
1869	if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1870			    LINK_ENCRYPTION_STATUS)) {
1871		drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
1872			connector->base.base.id, connector->base.name);
1873		ret = -EPERM;
1874		goto link_recover;
1875	}
1876
1877	if (hdcp->shim->stream_2_2_encryption) {
1878		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1879		if (ret) {
1880			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
1881				connector->base.base.id, connector->base.name);
1882			return ret;
1883		}
1884		drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1885			    transcoder_name(hdcp->stream_transcoder));
1886	}
1887
1888	return 0;
1889
1890link_recover:
1891	if (hdcp2_deauthenticate_port(connector) < 0)
1892		drm_dbg_kms(display->drm, "Port deauth failed.\n");
1893
1894	dig_port->hdcp_auth_status = false;
1895	data->k = 0;
1896
1897	return ret;
1898}
1899
1900static int hdcp2_enable_encryption(struct intel_connector *connector)
1901{
1902	struct intel_display *display = to_intel_display(connector);
1903	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1904	struct intel_hdcp *hdcp = &connector->hdcp;
1905	enum port port = dig_port->base.port;
1906	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1907	int ret;
1908
1909	drm_WARN_ON(display->drm,
1910		    intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1911		    LINK_ENCRYPTION_STATUS);
1912	if (hdcp->shim->toggle_signalling) {
1913		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1914						    true);
1915		if (ret) {
1916			drm_err(display->drm,
1917				"Failed to enable HDCP signalling. %d\n",
1918				ret);
1919			return ret;
1920		}
1921	}
1922
1923	if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1924	    LINK_AUTH_STATUS)
1925		/* Link is Authenticated. Now set for Encryption */
1926		intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1927			     0, CTL_LINK_ENCRYPTION_REQ);
 
 
1928
1929	ret = intel_de_wait_for_set(display,
1930				    HDCP2_STATUS(display, cpu_transcoder,
1931						 port),
1932				    LINK_ENCRYPTION_STATUS,
1933				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1934	dig_port->hdcp_auth_status = true;
1935
1936	return ret;
1937}
1938
1939static int hdcp2_disable_encryption(struct intel_connector *connector)
1940{
1941	struct intel_display *display = to_intel_display(connector);
1942	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
1943	struct intel_hdcp *hdcp = &connector->hdcp;
1944	enum port port = dig_port->base.port;
1945	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1946	int ret;
1947
1948	drm_WARN_ON(display->drm,
1949		    !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
1950				    LINK_ENCRYPTION_STATUS));
1951
1952	intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
1953		     CTL_LINK_ENCRYPTION_REQ, 0);
1954
1955	ret = intel_de_wait_for_clear(display,
1956				      HDCP2_STATUS(display, cpu_transcoder,
1957						   port),
1958				      LINK_ENCRYPTION_STATUS,
1959				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1960	if (ret == -ETIMEDOUT)
1961		drm_dbg_kms(display->drm, "Disable Encryption Timedout");
1962
1963	if (hdcp->shim->toggle_signalling) {
1964		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1965						    false);
1966		if (ret) {
1967			drm_err(display->drm,
1968				"Failed to disable HDCP signalling. %d\n",
1969				ret);
1970			return ret;
1971		}
1972	}
1973
1974	return ret;
1975}
1976
1977static int
1978hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1979{
1980	struct intel_display *display = to_intel_display(connector);
1981	int i, tries = 3, ret;
1982
1983	if (!connector->hdcp.is_repeater)
1984		return 0;
1985
1986	for (i = 0; i < tries; i++) {
1987		ret = _hdcp2_propagate_stream_management_info(connector);
1988		if (!ret)
1989			break;
1990
1991		/* Lets restart the auth incase of seq_num_m roll over */
1992		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1993			drm_dbg_kms(display->drm,
1994				    "seq_num_m roll over.(%d)\n", ret);
1995			break;
1996		}
1997
1998		drm_dbg_kms(display->drm,
1999			    "HDCP2 stream management %d of %d Failed.(%d)\n",
2000			    i + 1, tries, ret);
2001	}
2002
2003	return ret;
2004}
2005
2006static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
2007					  struct intel_connector *connector)
2008{
2009	struct intel_display *display = to_intel_display(connector);
2010	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2011	int ret = 0, i, tries = 3;
2012
2013	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
2014		ret = hdcp2_authenticate_sink(connector);
2015		if (!ret) {
2016			ret = intel_hdcp_prepare_streams(state, connector);
2017			if (ret) {
2018				drm_dbg_kms(display->drm,
2019					    "Prepare stream failed.(%d)\n",
2020					    ret);
2021				break;
2022			}
2023
2024			ret = hdcp2_propagate_stream_management_info(connector);
2025			if (ret) {
2026				drm_dbg_kms(display->drm,
2027					    "Stream management failed.(%d)\n",
2028					    ret);
2029				break;
2030			}
2031
2032			ret = hdcp2_authenticate_port(connector);
2033			if (!ret)
2034				break;
2035			drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n",
2036				    ret);
2037		}
2038
2039		/* Clearing the mei hdcp session */
2040		drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
2041			    i + 1, tries, ret);
2042		if (hdcp2_deauthenticate_port(connector) < 0)
2043			drm_dbg_kms(display->drm, "Port deauth failed.\n");
2044	}
2045
2046	if (!ret && !dig_port->hdcp_auth_status) {
2047		/*
2048		 * Ensuring the required 200mSec min time interval between
2049		 * Session Key Exchange and encryption.
2050		 */
2051		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
2052		ret = hdcp2_enable_encryption(connector);
2053		if (ret < 0) {
2054			drm_dbg_kms(display->drm,
2055				    "Encryption Enable Failed.(%d)\n", ret);
2056			if (hdcp2_deauthenticate_port(connector) < 0)
2057				drm_dbg_kms(display->drm, "Port deauth failed.\n");
2058		}
2059	}
2060
2061	if (!ret)
2062		ret = hdcp2_enable_stream_encryption(connector);
2063
2064	return ret;
2065}
2066
2067static int _intel_hdcp2_enable(struct intel_atomic_state *state,
2068			       struct intel_connector *connector)
2069{
2070	struct intel_display *display = to_intel_display(connector);
2071	struct intel_hdcp *hdcp = &connector->hdcp;
2072	int ret;
2073
2074	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
2075		    connector->base.base.id, connector->base.name,
2076		    hdcp->content_type);
2077
2078	intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
2079
2080	ret = hdcp2_authenticate_and_encrypt(state, connector);
2081	if (ret) {
2082		drm_dbg_kms(display->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
2083			    hdcp->content_type, ret);
2084		return ret;
2085	}
2086
2087	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
2088		    connector->base.base.id, connector->base.name,
2089		    hdcp->content_type);
2090
2091	hdcp->hdcp2_encrypted = true;
2092	return 0;
2093}
2094
2095static int
2096_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
2097{
2098	struct intel_display *display = to_intel_display(connector);
2099	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2100	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2101	struct intel_hdcp *hdcp = &connector->hdcp;
2102	int ret;
2103
2104	drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
2105		    connector->base.base.id, connector->base.name);
2106
2107	if (hdcp->shim->stream_2_2_encryption) {
2108		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2109		if (ret) {
2110			drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
2111				connector->base.base.id, connector->base.name);
2112			return ret;
2113		}
2114		drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2115			    transcoder_name(hdcp->stream_transcoder));
2116
2117		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2118			return 0;
2119	}
2120
2121	ret = hdcp2_disable_encryption(connector);
2122
2123	if (hdcp2_deauthenticate_port(connector) < 0)
2124		drm_dbg_kms(display->drm, "Port deauth failed.\n");
2125
2126	connector->hdcp.hdcp2_encrypted = false;
2127	dig_port->hdcp_auth_status = false;
2128	data->k = 0;
2129
2130	return ret;
2131}
2132
2133/* Implements the Link Integrity Check for HDCP2.2 */
2134static int intel_hdcp2_check_link(struct intel_connector *connector)
2135{
2136	struct intel_display *display = to_intel_display(connector);
2137	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 
2138	struct intel_hdcp *hdcp = &connector->hdcp;
2139	enum port port = dig_port->base.port;
2140	enum transcoder cpu_transcoder;
2141	int ret = 0;
2142
2143	mutex_lock(&hdcp->mutex);
2144	mutex_lock(&dig_port->hdcp_mutex);
2145	cpu_transcoder = hdcp->cpu_transcoder;
2146
2147	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2148	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2149	    !hdcp->hdcp2_encrypted) {
2150		ret = -EINVAL;
2151		goto out;
2152	}
2153
2154	if (drm_WARN_ON(display->drm,
2155			!intel_hdcp2_in_use(display, cpu_transcoder, port))) {
2156		drm_err(display->drm,
2157			"HDCP2.2 link stopped the encryption, %x\n",
2158			intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)));
2159		ret = -ENXIO;
2160		_intel_hdcp2_disable(connector, true);
2161		intel_hdcp_update_value(connector,
2162					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2163					true);
2164		goto out;
2165	}
2166
2167	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2168	if (ret == HDCP_LINK_PROTECTED) {
2169		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2170			intel_hdcp_update_value(connector,
2171					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2172					true);
2173		}
2174		goto out;
2175	}
2176
2177	if (ret == HDCP_TOPOLOGY_CHANGE) {
2178		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2179			goto out;
2180
2181		drm_dbg_kms(display->drm,
2182			    "HDCP2.2 Downstream topology change\n");
2183
2184		ret = hdcp2_authenticate_repeater_topology(connector);
2185		if (!ret) {
2186			intel_hdcp_update_value(connector,
2187						DRM_MODE_CONTENT_PROTECTION_ENABLED,
2188						true);
2189			goto out;
2190		}
2191
2192		drm_dbg_kms(display->drm,
2193			    "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
2194			    connector->base.base.id, connector->base.name,
2195			    ret);
2196	} else {
2197		drm_dbg_kms(display->drm,
2198			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
2199			    connector->base.base.id, connector->base.name);
 
 
 
 
 
 
 
 
 
 
2200	}
2201
2202	ret = _intel_hdcp2_disable(connector, true);
2203	if (ret) {
2204		drm_err(display->drm,
2205			"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
2206			connector->base.base.id, connector->base.name, ret);
2207		intel_hdcp_update_value(connector,
2208				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
 
2209		goto out;
2210	}
2211
2212	intel_hdcp_update_value(connector,
2213				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2214out:
2215	mutex_unlock(&dig_port->hdcp_mutex);
2216	mutex_unlock(&hdcp->mutex);
2217	return ret;
2218}
2219
2220static void intel_hdcp_check_work(struct work_struct *work)
2221{
2222	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2223					       struct intel_hdcp,
2224					       check_work);
2225	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2226	struct intel_display *display = to_intel_display(connector);
2227	struct drm_i915_private *i915 = to_i915(display->drm);
2228
2229	if (drm_connector_is_unregistered(&connector->base))
2230		return;
2231
2232	if (!intel_hdcp2_check_link(connector))
2233		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2234				   DRM_HDCP2_CHECK_PERIOD_MS);
2235	else if (!intel_hdcp_check_link(connector))
2236		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2237				   DRM_HDCP_CHECK_PERIOD_MS);
2238}
2239
2240static int i915_hdcp_component_bind(struct device *drv_kdev,
2241				    struct device *mei_kdev, void *data)
2242{
2243	struct intel_display *display = to_intel_display(drv_kdev);
2244
2245	drm_dbg(display->drm, "I915 HDCP comp bind\n");
2246	mutex_lock(&display->hdcp.hdcp_mutex);
2247	display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2248	display->hdcp.arbiter->hdcp_dev = mei_kdev;
2249	mutex_unlock(&display->hdcp.hdcp_mutex);
2250
2251	return 0;
2252}
2253
2254static void i915_hdcp_component_unbind(struct device *drv_kdev,
2255				       struct device *mei_kdev, void *data)
2256{
2257	struct intel_display *display = to_intel_display(drv_kdev);
2258
2259	drm_dbg(display->drm, "I915 HDCP comp unbind\n");
2260	mutex_lock(&display->hdcp.hdcp_mutex);
2261	display->hdcp.arbiter = NULL;
2262	mutex_unlock(&display->hdcp.hdcp_mutex);
2263}
2264
2265static const struct component_ops i915_hdcp_ops = {
2266	.bind   = i915_hdcp_component_bind,
2267	.unbind = i915_hdcp_component_unbind,
2268};
2269
2270static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2271{
2272	switch (port) {
2273	case PORT_A:
2274		return HDCP_DDI_A;
2275	case PORT_B ... PORT_F:
2276		return (enum hdcp_ddi)port;
2277	default:
2278		return HDCP_DDI_INVALID_PORT;
2279	}
2280}
2281
2282static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2283{
2284	switch (cpu_transcoder) {
2285	case TRANSCODER_A ... TRANSCODER_D:
2286		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2287	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2288		return HDCP_INVALID_TRANSCODER;
2289	}
2290}
2291
2292static int initialize_hdcp_port_data(struct intel_connector *connector,
2293				     struct intel_digital_port *dig_port,
2294				     const struct intel_hdcp_shim *shim)
2295{
2296	struct intel_display *display = to_intel_display(connector);
2297	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2298	enum port port = dig_port->base.port;
2299
2300	if (DISPLAY_VER(display) < 12)
2301		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
 
2302	else
2303		/*
2304		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2305		 * with zero(INVALID PORT index).
2306		 */
2307		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2308
2309	/*
2310	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2311	 * is initialized to zero (invalid transcoder index). This will be
2312	 * retained for <Gen12 forever.
2313	 */
2314	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2315
2316	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2317	data->protocol = (u8)shim->protocol;
2318
 
2319	if (!data->streams)
2320		data->streams = kcalloc(INTEL_NUM_PIPES(display),
2321					sizeof(struct hdcp2_streamid_type),
2322					GFP_KERNEL);
2323	if (!data->streams) {
2324		drm_err(display->drm, "Out of Memory\n");
2325		return -ENOMEM;
2326	}
2327
 
 
 
2328	return 0;
2329}
2330
2331static bool is_hdcp2_supported(struct intel_display *display)
2332{
2333	struct drm_i915_private *i915 = to_i915(display->drm);
2334
2335	if (intel_hdcp_gsc_cs_required(display))
2336		return true;
2337
2338	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2339		return false;
2340
2341	return (DISPLAY_VER(display) >= 10 ||
2342		IS_KABYLAKE(i915) ||
2343		IS_COFFEELAKE(i915) ||
2344		IS_COMETLAKE(i915));
 
2345}
2346
2347void intel_hdcp_component_init(struct intel_display *display)
2348{
2349	int ret;
2350
2351	if (!is_hdcp2_supported(display))
2352		return;
2353
2354	mutex_lock(&display->hdcp.hdcp_mutex);
2355	drm_WARN_ON(display->drm, display->hdcp.comp_added);
2356
2357	display->hdcp.comp_added = true;
2358	mutex_unlock(&display->hdcp.hdcp_mutex);
2359	if (intel_hdcp_gsc_cs_required(display))
2360		ret = intel_hdcp_gsc_init(display);
2361	else
2362		ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
2363					  I915_COMPONENT_HDCP);
2364
 
 
 
 
2365	if (ret < 0) {
2366		drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n",
2367			    ret);
2368		mutex_lock(&display->hdcp.hdcp_mutex);
2369		display->hdcp.comp_added = false;
2370		mutex_unlock(&display->hdcp.hdcp_mutex);
2371		return;
2372	}
2373}
2374
2375static void intel_hdcp2_init(struct intel_connector *connector,
2376			     struct intel_digital_port *dig_port,
2377			     const struct intel_hdcp_shim *shim)
2378{
2379	struct intel_display *display = to_intel_display(connector);
2380	struct intel_hdcp *hdcp = &connector->hdcp;
2381	int ret;
2382
2383	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2384	if (ret) {
2385		drm_dbg_kms(display->drm, "Mei hdcp data init failed\n");
2386		return;
2387	}
2388
2389	hdcp->hdcp2_supported = true;
2390}
2391
2392int intel_hdcp_init(struct intel_connector *connector,
2393		    struct intel_digital_port *dig_port,
2394		    const struct intel_hdcp_shim *shim)
2395{
2396	struct intel_display *display = to_intel_display(connector);
2397	struct intel_hdcp *hdcp = &connector->hdcp;
2398	int ret;
2399
2400	if (!shim)
2401		return -EINVAL;
2402
2403	if (is_hdcp2_supported(display))
2404		intel_hdcp2_init(connector, dig_port, shim);
2405
2406	ret = drm_connector_attach_content_protection_property(&connector->base,
2407							       hdcp->hdcp2_supported);
 
2408	if (ret) {
2409		hdcp->hdcp2_supported = false;
2410		kfree(dig_port->hdcp_port_data.streams);
2411		return ret;
2412	}
2413
2414	hdcp->shim = shim;
2415	mutex_init(&hdcp->mutex);
2416	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2417	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2418	init_waitqueue_head(&hdcp->cp_irq_queue);
2419
2420	return 0;
2421}
2422
2423static int _intel_hdcp_enable(struct intel_atomic_state *state,
2424			      struct intel_encoder *encoder,
2425			      const struct intel_crtc_state *pipe_config,
2426			      const struct drm_connector_state *conn_state)
2427{
2428	struct intel_display *display = to_intel_display(encoder);
2429	struct drm_i915_private *i915 = to_i915(display->drm);
2430	struct intel_connector *connector =
2431		to_intel_connector(conn_state->connector);
2432	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2433	struct intel_hdcp *hdcp = &connector->hdcp;
2434	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2435	int ret = -EINVAL;
2436
2437	if (!hdcp->shim)
2438		return -ENOENT;
2439
2440	if (!connector->encoder) {
2441		drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
2442			connector->base.base.id, connector->base.name);
2443		return -ENODEV;
2444	}
2445
2446	mutex_lock(&hdcp->mutex);
2447	mutex_lock(&dig_port->hdcp_mutex);
2448	drm_WARN_ON(display->drm,
2449		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2450	hdcp->content_type = (u8)conn_state->hdcp_content_type;
2451
2452	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2453		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2454		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2455	} else {
2456		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2457		hdcp->stream_transcoder = INVALID_TRANSCODER;
2458	}
2459
2460	if (DISPLAY_VER(display) >= 12)
2461		dig_port->hdcp_port_data.hdcp_transcoder =
2462			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2463
2464	/*
2465	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2466	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2467	 */
2468	if (intel_hdcp2_get_capability(connector)) {
2469		ret = _intel_hdcp2_enable(state, connector);
2470		if (!ret)
2471			check_link_interval =
2472				DRM_HDCP2_CHECK_PERIOD_MS;
2473	}
2474
2475	/*
2476	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2477	 * be attempted.
2478	 */
2479	if (ret && intel_hdcp_get_capability(connector) &&
2480	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2481		ret = intel_hdcp1_enable(connector);
2482	}
2483
2484	if (!ret) {
2485		queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2486				   check_link_interval);
2487		intel_hdcp_update_value(connector,
2488					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2489					true);
2490	}
2491
2492	mutex_unlock(&dig_port->hdcp_mutex);
2493	mutex_unlock(&hdcp->mutex);
2494	return ret;
2495}
2496
2497void intel_hdcp_enable(struct intel_atomic_state *state,
2498		       struct intel_encoder *encoder,
2499		       const struct intel_crtc_state *crtc_state,
2500		       const struct drm_connector_state *conn_state)
2501{
2502	struct intel_connector *connector =
2503		to_intel_connector(conn_state->connector);
2504	struct intel_hdcp *hdcp = &connector->hdcp;
2505
2506	/*
2507	 * Enable hdcp if it's desired or if userspace is enabled and
2508	 * driver set its state to undesired
2509	 */
2510	if (conn_state->content_protection ==
2511	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2512	    (conn_state->content_protection ==
2513	    DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
2514	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2515		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2516}
2517
2518int intel_hdcp_disable(struct intel_connector *connector)
2519{
2520	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2521	struct intel_hdcp *hdcp = &connector->hdcp;
2522	int ret = 0;
2523
2524	if (!hdcp->shim)
2525		return -ENOENT;
2526
2527	mutex_lock(&hdcp->mutex);
2528	mutex_lock(&dig_port->hdcp_mutex);
2529
2530	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2531		goto out;
2532
2533	intel_hdcp_update_value(connector,
2534				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2535	if (hdcp->hdcp2_encrypted)
2536		ret = _intel_hdcp2_disable(connector, false);
2537	else if (hdcp->hdcp_encrypted)
2538		ret = _intel_hdcp_disable(connector);
 
2539
2540out:
2541	mutex_unlock(&dig_port->hdcp_mutex);
2542	mutex_unlock(&hdcp->mutex);
2543	cancel_delayed_work_sync(&hdcp->check_work);
2544	return ret;
2545}
2546
2547void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2548			    struct intel_encoder *encoder,
2549			    const struct intel_crtc_state *crtc_state,
2550			    const struct drm_connector_state *conn_state)
2551{
2552	struct intel_connector *connector =
2553				to_intel_connector(conn_state->connector);
2554	struct intel_hdcp *hdcp = &connector->hdcp;
2555	bool content_protection_type_changed, desired_and_not_enabled = false;
2556	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2557
2558	if (!connector->hdcp.shim)
2559		return;
2560
2561	content_protection_type_changed =
2562		(conn_state->hdcp_content_type != hdcp->content_type &&
2563		 conn_state->content_protection !=
2564		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
 
2565
2566	/*
2567	 * During the HDCP encryption session if Type change is requested,
2568	 * disable the HDCP and reenable it with new TYPE value.
2569	 */
2570	if (conn_state->content_protection ==
2571	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2572	    content_protection_type_changed)
2573		intel_hdcp_disable(connector);
2574
2575	/*
2576	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2577	 * change procedure.
2578	 */
2579	if (content_protection_type_changed) {
2580		mutex_lock(&hdcp->mutex);
2581		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2582		drm_connector_get(&connector->base);
2583		if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2584			drm_connector_put(&connector->base);
2585		mutex_unlock(&hdcp->mutex);
2586	}
2587
2588	if (conn_state->content_protection ==
2589	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2590		mutex_lock(&hdcp->mutex);
2591		/* Avoid enabling hdcp, if it already ENABLED */
2592		desired_and_not_enabled =
2593			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2594		mutex_unlock(&hdcp->mutex);
2595		/*
2596		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2597		 * prop_work to update correct CP property to user space.
2598		 */
2599		if (!desired_and_not_enabled && !content_protection_type_changed) {
2600			drm_connector_get(&connector->base);
2601			if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
2602				drm_connector_put(&connector->base);
2603
2604		}
2605	}
2606
2607	if (desired_and_not_enabled || content_protection_type_changed)
2608		_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
 
 
2609}
2610
2611void intel_hdcp_component_fini(struct intel_display *display)
2612{
2613	mutex_lock(&display->hdcp.hdcp_mutex);
2614	if (!display->hdcp.comp_added) {
2615		mutex_unlock(&display->hdcp.hdcp_mutex);
2616		return;
2617	}
2618
2619	display->hdcp.comp_added = false;
2620	mutex_unlock(&display->hdcp.hdcp_mutex);
2621
2622	if (intel_hdcp_gsc_cs_required(display))
2623		intel_hdcp_gsc_fini(display);
2624	else
2625		component_del(display->drm->dev, &i915_hdcp_ops);
2626}
2627
2628void intel_hdcp_cleanup(struct intel_connector *connector)
2629{
2630	struct intel_hdcp *hdcp = &connector->hdcp;
2631
2632	if (!hdcp->shim)
2633		return;
2634
2635	/*
2636	 * If the connector is registered, it's possible userspace could kick
2637	 * off another HDCP enable, which would re-spawn the workers.
2638	 */
2639	drm_WARN_ON(connector->base.dev,
2640		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2641
2642	/*
2643	 * Now that the connector is not registered, check_work won't be run,
2644	 * but cancel any outstanding instances of it
2645	 */
2646	cancel_delayed_work_sync(&hdcp->check_work);
2647
2648	/*
2649	 * We don't cancel prop_work in the same way as check_work since it
2650	 * requires connection_mutex which could be held while calling this
2651	 * function. Instead, we rely on the connector references grabbed before
2652	 * scheduling prop_work to ensure the connector is alive when prop_work
2653	 * is run. So if we're in the destroy path (which is where this
2654	 * function should be called), we're "guaranteed" that prop_work is not
2655	 * active (tl;dr This Should Never Happen).
2656	 */
2657	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2658
2659	mutex_lock(&hdcp->mutex);
2660	hdcp->shim = NULL;
2661	mutex_unlock(&hdcp->mutex);
2662}
2663
2664void intel_hdcp_atomic_check(struct drm_connector *connector,
2665			     struct drm_connector_state *old_state,
2666			     struct drm_connector_state *new_state)
2667{
2668	u64 old_cp = old_state->content_protection;
2669	u64 new_cp = new_state->content_protection;
2670	struct drm_crtc_state *crtc_state;
2671
2672	if (!new_state->crtc) {
2673		/*
2674		 * If the connector is being disabled with CP enabled, mark it
2675		 * desired so it's re-enabled when the connector is brought back
2676		 */
2677		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2678			new_state->content_protection =
2679				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2680		return;
2681	}
2682
2683	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2684						   new_state->crtc);
2685	/*
2686	 * Fix the HDCP uapi content protection state in case of modeset.
2687	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2688	 * need to be sent if there is transition from ENABLED->DESIRED.
2689	 */
2690	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2691	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2692	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2693		new_state->content_protection =
2694			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2695
2696	/*
2697	 * Nothing to do if the state didn't change, or HDCP was activated since
2698	 * the last commit. And also no change in hdcp content type.
2699	 */
2700	if (old_cp == new_cp ||
2701	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2702	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2703		if (old_state->hdcp_content_type ==
2704				new_state->hdcp_content_type)
2705			return;
2706	}
2707
2708	crtc_state->mode_changed = true;
2709}
2710
2711/* Handles the CP_IRQ raised from the DP HDCP sink */
2712void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2713{
2714	struct intel_hdcp *hdcp = &connector->hdcp;
2715	struct intel_display *display = to_intel_display(connector);
2716	struct drm_i915_private *i915 = to_i915(display->drm);
2717
2718	if (!hdcp->shim)
2719		return;
2720
2721	atomic_inc(&connector->hdcp.cp_irq_count);
2722	wake_up_all(&connector->hdcp.cp_irq_queue);
2723
2724	queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2725}
v5.9
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright (C) 2017 Google, Inc.
   4 * Copyright _ 2017-2019, Intel Corporation.
   5 *
   6 * Authors:
   7 * Sean Paul <seanpaul@chromium.org>
   8 * Ramalingam C <ramalingam.c@intel.com>
   9 */
  10
  11#include <linux/component.h>
  12#include <linux/i2c.h>
  13#include <linux/random.h>
  14
  15#include <drm/drm_hdcp.h>
  16#include <drm/i915_component.h>
  17
 
  18#include "i915_reg.h"
 
 
  19#include "intel_display_power.h"
 
  20#include "intel_display_types.h"
  21#include "intel_hdcp.h"
  22#include "intel_sideband.h"
  23#include "intel_connector.h"
 
 
  24
  25#define KEY_LOAD_TRIES	5
  26#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
  27#define HDCP2_LC_RETRY_CNT			3
  28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  29static
  30bool intel_hdcp_is_ksv_valid(u8 *ksv)
  31{
  32	int i, ones = 0;
  33	/* KSV has 20 1's and 20 0's */
  34	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
  35		ones += hweight8(ksv[i]);
  36	if (ones != 20)
  37		return false;
  38
  39	return true;
  40}
  41
  42static
  43int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
  44			       const struct intel_hdcp_shim *shim, u8 *bksv)
  45{
  46	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  47	int ret, i, tries = 2;
  48
  49	/* HDCP spec states that we must retry the bksv if it is invalid */
  50	for (i = 0; i < tries; i++) {
  51		ret = shim->read_bksv(dig_port, bksv);
  52		if (ret)
  53			return ret;
  54		if (intel_hdcp_is_ksv_valid(bksv))
  55			break;
  56	}
  57	if (i == tries) {
  58		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
  59		return -ENODEV;
  60	}
  61
  62	return 0;
  63}
  64
  65/* Is HDCP1.4 capable on Platform and Sink */
  66bool intel_hdcp_capable(struct intel_connector *connector)
  67{
  68	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
  69	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
  70	bool capable = false;
  71	u8 bksv[5];
  72
 
 
 
 
 
  73	if (!shim)
  74		return capable;
  75
  76	if (shim->hdcp_capable) {
  77		shim->hdcp_capable(dig_port, &capable);
  78	} else {
  79		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
  80			capable = true;
  81	}
  82
  83	return capable;
  84}
  85
  86/* Is HDCP2.2 capable on Platform and Sink */
  87bool intel_hdcp2_capable(struct intel_connector *connector)
 
 
 
  88{
  89	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
  90	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
  91	struct intel_hdcp *hdcp = &connector->hdcp;
  92	bool capable = false;
  93
  94	/* I915 support for HDCP2.2 */
  95	if (!hdcp->hdcp2_supported)
  96		return false;
  97
  98	/* MEI interface is solid */
  99	mutex_lock(&dev_priv->hdcp_comp_mutex);
 100	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
 101		mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 
 
 
 
 
 102		return false;
 103	}
 104	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 105
 106	/* Sink's capability for HDCP2.2 */
 107	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
 108
 109	return capable;
 110}
 111
 112static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 113			      enum transcoder cpu_transcoder, enum port port)
 114{
 115	return intel_de_read(dev_priv,
 116	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 117	       HDCP_STATUS_ENC;
 118}
 119
 120static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
 121			       enum transcoder cpu_transcoder, enum port port)
 122{
 123	return intel_de_read(dev_priv,
 124	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
 125	       LINK_ENCRYPTION_STATUS;
 126}
 127
 128static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 129				    const struct intel_hdcp_shim *shim)
 130{
 131	int ret, read_ret;
 132	bool ksv_ready;
 133
 134	/* Poll for ksv list ready (spec says max time allowed is 5s) */
 135	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
 136							 &ksv_ready),
 137			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
 138			 100 * 1000);
 139	if (ret)
 140		return ret;
 141	if (read_ret)
 142		return read_ret;
 143	if (!ksv_ready)
 144		return -ETIMEDOUT;
 145
 146	return 0;
 147}
 148
 149static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
 150{
 151	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 152	struct i915_power_well *power_well;
 153	enum i915_power_well_id id;
 
 154	bool enabled = false;
 155
 156	/*
 157	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
 158	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
 159	 */
 160	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 161		id = HSW_DISP_PW_GLOBAL;
 162	else
 163		id = SKL_DISP_PW_1;
 164
 165	mutex_lock(&power_domains->lock);
 166
 167	/* PG1 (power well #1) needs to be enabled */
 168	for_each_power_well(dev_priv, power_well) {
 169		if (power_well->desc->id == id) {
 170			enabled = power_well->desc->ops->is_enabled(dev_priv,
 171								    power_well);
 172			break;
 173		}
 174	}
 175	mutex_unlock(&power_domains->lock);
 176
 177	/*
 178	 * Another req for hdcp key loadability is enabled state of pll for
 179	 * cdclk. Without active crtc we wont land here. So we are assuming that
 180	 * cdclk is already on.
 181	 */
 182
 183	return enabled;
 184}
 185
 186static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
 187{
 188	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
 189	intel_de_write(dev_priv, HDCP_KEY_STATUS,
 190		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 191}
 192
 193static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 194{
 
 195	int ret;
 196	u32 val;
 197
 198	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
 199	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 200		return 0;
 201
 202	/*
 203	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
 204	 * out of reset. So if Key is not already loaded, its an error state.
 205	 */
 206	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 207		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 208			return -ENXIO;
 209
 210	/*
 211	 * Initiate loading the HDCP key from fuses.
 212	 *
 213	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
 214	 * platforms except BXT and GLK, differ in the key load trigger process
 215	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
 216	 */
 217	if (IS_GEN9_BC(dev_priv)) {
 218		ret = sandybridge_pcode_write(dev_priv,
 219					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
 220		if (ret) {
 221			drm_err(&dev_priv->drm,
 222				"Failed to initiate HDCP key load (%d)\n",
 223				ret);
 224			return ret;
 225		}
 226	} else {
 227		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 228	}
 229
 230	/* Wait for the keys to load (500us) */
 231	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
 232					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 233					10, 1, &val);
 234	if (ret)
 235		return ret;
 236	else if (!(val & HDCP_KEY_LOAD_STATUS))
 237		return -ENXIO;
 238
 239	/* Send Aksv over to PCH display for use in authentication */
 240	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 241
 242	return 0;
 243}
 244
 245/* Returns updated SHA-1 index */
 246static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
 247{
 248	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
 249	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
 250		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
 251		return -ETIMEDOUT;
 252	}
 253	return 0;
 254}
 255
 256static
 257u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
 258				enum transcoder cpu_transcoder, enum port port)
 259{
 260	if (INTEL_GEN(dev_priv) >= 12) {
 261		switch (cpu_transcoder) {
 262		case TRANSCODER_A:
 263			return HDCP_TRANSA_REP_PRESENT |
 264			       HDCP_TRANSA_SHA1_M0;
 265		case TRANSCODER_B:
 266			return HDCP_TRANSB_REP_PRESENT |
 267			       HDCP_TRANSB_SHA1_M0;
 268		case TRANSCODER_C:
 269			return HDCP_TRANSC_REP_PRESENT |
 270			       HDCP_TRANSC_SHA1_M0;
 271		case TRANSCODER_D:
 272			return HDCP_TRANSD_REP_PRESENT |
 273			       HDCP_TRANSD_SHA1_M0;
 274		default:
 275			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
 276				cpu_transcoder);
 277			return -EINVAL;
 278		}
 279	}
 280
 281	switch (port) {
 282	case PORT_A:
 283		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
 284	case PORT_B:
 285		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
 286	case PORT_C:
 287		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
 288	case PORT_D:
 289		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
 290	case PORT_E:
 291		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 292	default:
 293		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
 294		return -EINVAL;
 295	}
 296}
 297
 298static
 299int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 300				const struct intel_hdcp_shim *shim,
 301				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 302{
 
 303	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 304	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 305	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 306	enum port port = dig_port->base.port;
 307	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 308	int ret, i, j, sha_idx;
 309
 310	/* Process V' values from the receiver */
 311	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 312		ret = shim->read_v_prime_part(dig_port, i, &vprime);
 313		if (ret)
 314			return ret;
 315		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
 316	}
 317
 318	/*
 319	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
 320	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
 321	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
 322	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
 323	 * index will keep track of our progress through the 64 bytes as well as
 324	 * helping us work the 40-bit KSVs through our 32-bit register.
 325	 *
 326	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
 327	 */
 328	sha_idx = 0;
 329	sha_text = 0;
 330	sha_leftovers = 0;
 331	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
 332	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 333	for (i = 0; i < num_downstream; i++) {
 334		unsigned int sha_empty;
 335		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
 336
 337		/* Fill up the empty slots in sha_text and write it out */
 338		sha_empty = sizeof(sha_text) - sha_leftovers;
 339		for (j = 0; j < sha_empty; j++) {
 340			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
 341			sha_text |= ksv[j] << off;
 342		}
 343
 344		ret = intel_write_sha_text(dev_priv, sha_text);
 345		if (ret < 0)
 346			return ret;
 347
 348		/* Programming guide writes this every 64 bytes */
 349		sha_idx += sizeof(sha_text);
 350		if (!(sha_idx % 64))
 351			intel_de_write(dev_priv, HDCP_REP_CTL,
 352				       rep_ctl | HDCP_SHA1_TEXT_32);
 353
 354		/* Store the leftover bytes from the ksv in sha_text */
 355		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
 356		sha_text = 0;
 357		for (j = 0; j < sha_leftovers; j++)
 358			sha_text |= ksv[sha_empty + j] <<
 359					((sizeof(sha_text) - j - 1) * 8);
 360
 361		/*
 362		 * If we still have room in sha_text for more data, continue.
 363		 * Otherwise, write it out immediately.
 364		 */
 365		if (sizeof(sha_text) > sha_leftovers)
 366			continue;
 367
 368		ret = intel_write_sha_text(dev_priv, sha_text);
 369		if (ret < 0)
 370			return ret;
 371		sha_leftovers = 0;
 372		sha_text = 0;
 373		sha_idx += sizeof(sha_text);
 374	}
 375
 376	/*
 377	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
 378	 * bytes are leftover from the last ksv, we might be able to fit them
 379	 * all in sha_text (first 2 cases), or we might need to split them up
 380	 * into 2 writes (last 2 cases).
 381	 */
 382	if (sha_leftovers == 0) {
 383		/* Write 16 bits of text, 16 bits of M0 */
 384		intel_de_write(dev_priv, HDCP_REP_CTL,
 385			       rep_ctl | HDCP_SHA1_TEXT_16);
 386		ret = intel_write_sha_text(dev_priv,
 387					   bstatus[0] << 8 | bstatus[1]);
 388		if (ret < 0)
 389			return ret;
 390		sha_idx += sizeof(sha_text);
 391
 392		/* Write 32 bits of M0 */
 393		intel_de_write(dev_priv, HDCP_REP_CTL,
 394			       rep_ctl | HDCP_SHA1_TEXT_0);
 395		ret = intel_write_sha_text(dev_priv, 0);
 396		if (ret < 0)
 397			return ret;
 398		sha_idx += sizeof(sha_text);
 399
 400		/* Write 16 bits of M0 */
 401		intel_de_write(dev_priv, HDCP_REP_CTL,
 402			       rep_ctl | HDCP_SHA1_TEXT_16);
 403		ret = intel_write_sha_text(dev_priv, 0);
 404		if (ret < 0)
 405			return ret;
 406		sha_idx += sizeof(sha_text);
 407
 408	} else if (sha_leftovers == 1) {
 409		/* Write 24 bits of text, 8 bits of M0 */
 410		intel_de_write(dev_priv, HDCP_REP_CTL,
 411			       rep_ctl | HDCP_SHA1_TEXT_24);
 412		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 413		/* Only 24-bits of data, must be in the LSB */
 414		sha_text = (sha_text & 0xffffff00) >> 8;
 415		ret = intel_write_sha_text(dev_priv, sha_text);
 416		if (ret < 0)
 417			return ret;
 418		sha_idx += sizeof(sha_text);
 419
 420		/* Write 32 bits of M0 */
 421		intel_de_write(dev_priv, HDCP_REP_CTL,
 422			       rep_ctl | HDCP_SHA1_TEXT_0);
 423		ret = intel_write_sha_text(dev_priv, 0);
 424		if (ret < 0)
 425			return ret;
 426		sha_idx += sizeof(sha_text);
 427
 428		/* Write 24 bits of M0 */
 429		intel_de_write(dev_priv, HDCP_REP_CTL,
 430			       rep_ctl | HDCP_SHA1_TEXT_8);
 431		ret = intel_write_sha_text(dev_priv, 0);
 432		if (ret < 0)
 433			return ret;
 434		sha_idx += sizeof(sha_text);
 435
 436	} else if (sha_leftovers == 2) {
 437		/* Write 32 bits of text */
 438		intel_de_write(dev_priv, HDCP_REP_CTL,
 439			       rep_ctl | HDCP_SHA1_TEXT_32);
 440		sha_text |= bstatus[0] << 8 | bstatus[1];
 441		ret = intel_write_sha_text(dev_priv, sha_text);
 442		if (ret < 0)
 443			return ret;
 444		sha_idx += sizeof(sha_text);
 445
 446		/* Write 64 bits of M0 */
 447		intel_de_write(dev_priv, HDCP_REP_CTL,
 448			       rep_ctl | HDCP_SHA1_TEXT_0);
 449		for (i = 0; i < 2; i++) {
 450			ret = intel_write_sha_text(dev_priv, 0);
 451			if (ret < 0)
 452				return ret;
 453			sha_idx += sizeof(sha_text);
 454		}
 455
 456		/*
 457		 * Terminate the SHA-1 stream by hand. For the other leftover
 458		 * cases this is appended by the hardware.
 459		 */
 460		intel_de_write(dev_priv, HDCP_REP_CTL,
 461			       rep_ctl | HDCP_SHA1_TEXT_32);
 462		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
 463		ret = intel_write_sha_text(dev_priv, sha_text);
 464		if (ret < 0)
 465			return ret;
 466		sha_idx += sizeof(sha_text);
 467	} else if (sha_leftovers == 3) {
 468		/* Write 32 bits of text (filled from LSB) */
 469		intel_de_write(dev_priv, HDCP_REP_CTL,
 470			       rep_ctl | HDCP_SHA1_TEXT_32);
 471		sha_text |= bstatus[0];
 472		ret = intel_write_sha_text(dev_priv, sha_text);
 473		if (ret < 0)
 474			return ret;
 475		sha_idx += sizeof(sha_text);
 476
 477		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
 478		intel_de_write(dev_priv, HDCP_REP_CTL,
 479			       rep_ctl | HDCP_SHA1_TEXT_8);
 480		ret = intel_write_sha_text(dev_priv, bstatus[1]);
 481		if (ret < 0)
 482			return ret;
 483		sha_idx += sizeof(sha_text);
 484
 485		/* Write 32 bits of M0 */
 486		intel_de_write(dev_priv, HDCP_REP_CTL,
 487			       rep_ctl | HDCP_SHA1_TEXT_0);
 488		ret = intel_write_sha_text(dev_priv, 0);
 489		if (ret < 0)
 490			return ret;
 491		sha_idx += sizeof(sha_text);
 492
 493		/* Write 8 bits of M0 */
 494		intel_de_write(dev_priv, HDCP_REP_CTL,
 495			       rep_ctl | HDCP_SHA1_TEXT_24);
 496		ret = intel_write_sha_text(dev_priv, 0);
 497		if (ret < 0)
 498			return ret;
 499		sha_idx += sizeof(sha_text);
 500	} else {
 501		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
 502			    sha_leftovers);
 503		return -EINVAL;
 504	}
 505
 506	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 507	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 508	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 509		ret = intel_write_sha_text(dev_priv, 0);
 510		if (ret < 0)
 511			return ret;
 512		sha_idx += sizeof(sha_text);
 513	}
 514
 515	/*
 516	 * Last write gets the length of the concatenation in bits. That is:
 517	 *  - 5 bytes per device
 518	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
 519	 */
 520	sha_text = (num_downstream * 5 + 10) * 8;
 521	ret = intel_write_sha_text(dev_priv, sha_text);
 522	if (ret < 0)
 523		return ret;
 524
 525	/* Tell the HW we're done with the hash and wait for it to ACK */
 526	intel_de_write(dev_priv, HDCP_REP_CTL,
 527		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 528	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
 529				  HDCP_SHA1_COMPLETE, 1)) {
 530		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
 531		return -ETIMEDOUT;
 532	}
 533	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
 534		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
 535		return -ENXIO;
 536	}
 537
 538	return 0;
 539}
 540
 541/* Implements Part 2 of the HDCP authorization procedure */
 542static
 543int intel_hdcp_auth_downstream(struct intel_connector *connector)
 544{
 
 545	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 546	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 547	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 548	u8 bstatus[2], num_downstream, *ksv_fifo;
 549	int ret, i, tries = 3;
 550
 551	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
 552	if (ret) {
 553		drm_dbg_kms(&dev_priv->drm,
 554			    "KSV list failed to become ready (%d)\n", ret);
 555		return ret;
 556	}
 557
 558	ret = shim->read_bstatus(dig_port, bstatus);
 559	if (ret)
 560		return ret;
 561
 562	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 563	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
 564		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
 565		return -EPERM;
 566	}
 567
 568	/*
 569	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
 570	 * the HDCP encryption. That implies that repeater can't have its own
 571	 * display. As there is no consumption of encrypted content in the
 572	 * repeater with 0 downstream devices, we are failing the
 573	 * authentication.
 574	 */
 575	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 576	if (num_downstream == 0) {
 577		drm_dbg_kms(&dev_priv->drm,
 578			    "Repeater with zero downstream devices\n");
 579		return -EINVAL;
 580	}
 581
 582	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 583	if (!ksv_fifo) {
 584		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
 585		return -ENOMEM;
 586	}
 587
 588	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
 589	if (ret)
 590		goto err;
 591
 592	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
 593					num_downstream) > 0) {
 594		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
 595		ret = -EPERM;
 596		goto err;
 597	}
 598
 599	/*
 600	 * When V prime mismatches, DP Spec mandates re-read of
 601	 * V prime atleast twice.
 602	 */
 603	for (i = 0; i < tries; i++) {
 604		ret = intel_hdcp_validate_v_prime(connector, shim,
 605						  ksv_fifo, num_downstream,
 606						  bstatus);
 607		if (!ret)
 608			break;
 609	}
 610
 611	if (i == tries) {
 612		drm_dbg_kms(&dev_priv->drm,
 613			    "V Prime validation failed.(%d)\n", ret);
 614		goto err;
 615	}
 616
 617	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
 618		    num_downstream);
 619	ret = 0;
 620err:
 621	kfree(ksv_fifo);
 622	return ret;
 623}
 624
 625/* Implements Part 1 of the HDCP authorization procedure */
 626static int intel_hdcp_auth(struct intel_connector *connector)
 627{
 
 628	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 629	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 630	struct intel_hdcp *hdcp = &connector->hdcp;
 631	const struct intel_hdcp_shim *shim = hdcp->shim;
 632	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 633	enum port port = dig_port->base.port;
 634	unsigned long r0_prime_gen_start;
 635	int ret, i, tries = 2;
 636	union {
 637		u32 reg[2];
 638		u8 shim[DRM_HDCP_AN_LEN];
 639	} an;
 640	union {
 641		u32 reg[2];
 642		u8 shim[DRM_HDCP_KSV_LEN];
 643	} bksv;
 644	union {
 645		u32 reg;
 646		u8 shim[DRM_HDCP_RI_LEN];
 647	} ri;
 648	bool repeater_present, hdcp_capable;
 649
 650	/*
 651	 * Detects whether the display is HDCP capable. Although we check for
 652	 * valid Bksv below, the HDCP over DP spec requires that we check
 653	 * whether the display supports HDCP before we write An. For HDMI
 654	 * displays, this is not necessary.
 655	 */
 656	if (shim->hdcp_capable) {
 657		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
 658		if (ret)
 659			return ret;
 660		if (!hdcp_capable) {
 661			drm_dbg_kms(&dev_priv->drm,
 662				    "Panel is not HDCP capable\n");
 663			return -EINVAL;
 664		}
 665	}
 666
 667	/* Initialize An with 2 random values and acquire it */
 668	for (i = 0; i < 2; i++)
 669		intel_de_write(dev_priv,
 670			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
 671			       get_random_u32());
 672	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
 673		       HDCP_CONF_CAPTURE_AN);
 674
 675	/* Wait for An to be acquired */
 676	if (intel_de_wait_for_set(dev_priv,
 677				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 678				  HDCP_STATUS_AN_READY, 1)) {
 679		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
 680		return -ETIMEDOUT;
 681	}
 682
 683	an.reg[0] = intel_de_read(dev_priv,
 684				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
 685	an.reg[1] = intel_de_read(dev_priv,
 686				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
 687	ret = shim->write_an_aksv(dig_port, an.shim);
 688	if (ret)
 689		return ret;
 690
 691	r0_prime_gen_start = jiffies;
 692
 693	memset(&bksv, 0, sizeof(bksv));
 694
 695	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
 696	if (ret < 0)
 697		return ret;
 698
 699	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
 700		drm_err(&dev_priv->drm, "BKSV is revoked\n");
 701		return -EPERM;
 702	}
 703
 704	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
 705		       bksv.reg[0]);
 706	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
 707		       bksv.reg[1]);
 708
 709	ret = shim->repeater_present(dig_port, &repeater_present);
 710	if (ret)
 711		return ret;
 712	if (repeater_present)
 713		intel_de_write(dev_priv, HDCP_REP_CTL,
 714			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
 715
 716	ret = shim->toggle_signalling(dig_port, true);
 717	if (ret)
 718		return ret;
 719
 720	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
 721		       HDCP_CONF_AUTH_AND_ENC);
 722
 723	/* Wait for R0 ready */
 724	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 725		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
 726		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
 727		return -ETIMEDOUT;
 728	}
 729
 730	/*
 731	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
 732	 * some monitors can take longer than this. We'll set the timeout at
 733	 * 300ms just to be sure.
 734	 *
 735	 * On DP, there's an R0_READY bit available but no such bit
 736	 * exists on HDMI. Since the upper-bound is the same, we'll just do
 737	 * the stupid thing instead of polling on one and not the other.
 738	 */
 739	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 740
 741	tries = 3;
 742
 743	/*
 744	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
 745	 * of R0 mismatch.
 746	 */
 747	for (i = 0; i < tries; i++) {
 748		ri.reg = 0;
 749		ret = shim->read_ri_prime(dig_port, ri.shim);
 750		if (ret)
 751			return ret;
 752		intel_de_write(dev_priv,
 753			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
 754			       ri.reg);
 755
 756		/* Wait for Ri prime match */
 757		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 758			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 759			break;
 760	}
 761
 762	if (i == tries) {
 763		drm_dbg_kms(&dev_priv->drm,
 764			    "Timed out waiting for Ri prime match (%x)\n",
 765			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
 766					  cpu_transcoder, port)));
 767		return -ETIMEDOUT;
 768	}
 769
 770	/* Wait for encryption confirmation */
 771	if (intel_de_wait_for_set(dev_priv,
 772				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 773				  HDCP_STATUS_ENC,
 774				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 775		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
 776		return -ETIMEDOUT;
 777	}
 778
 779	/*
 780	 * XXX: If we have MST-connected devices, we need to enable encryption
 781	 * on those as well.
 782	 */
 
 
 
 
 
 
 
 783
 784	if (repeater_present)
 785		return intel_hdcp_auth_downstream(connector);
 786
 787	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
 788	return 0;
 789}
 790
 791static int _intel_hdcp_disable(struct intel_connector *connector)
 792{
 
 793	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 794	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 795	struct intel_hdcp *hdcp = &connector->hdcp;
 796	enum port port = dig_port->base.port;
 797	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 798	u32 repeater_ctl;
 799	int ret;
 800
 801	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
 802		    connector->base.name, connector->base.base.id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803
 804	hdcp->hdcp_encrypted = false;
 805	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
 806	if (intel_de_wait_for_clear(dev_priv,
 807				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
 808				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 809		drm_err(&dev_priv->drm,
 810			"Failed to disable HDCP, timeout clearing status\n");
 811		return -ETIMEDOUT;
 812	}
 813
 814	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
 815						   port);
 816	intel_de_write(dev_priv, HDCP_REP_CTL,
 817		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
 818
 819	ret = hdcp->shim->toggle_signalling(dig_port, false);
 820	if (ret) {
 821		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
 822		return ret;
 823	}
 824
 825	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
 826	return 0;
 827}
 828
 829static int _intel_hdcp_enable(struct intel_connector *connector)
 830{
 831	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 832	struct intel_hdcp *hdcp = &connector->hdcp;
 833	int i, ret, tries = 3;
 834
 835	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
 836		    connector->base.name, connector->base.base.id);
 837
 838	if (!hdcp_key_loadable(dev_priv)) {
 839		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
 840		return -ENXIO;
 841	}
 842
 843	for (i = 0; i < KEY_LOAD_TRIES; i++) {
 844		ret = intel_hdcp_load_keys(dev_priv);
 845		if (!ret)
 846			break;
 847		intel_hdcp_clear_keys(dev_priv);
 848	}
 849	if (ret) {
 850		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
 851			ret);
 852		return ret;
 853	}
 854
 855	/* Incase of authentication failures, HDCP spec expects reauth. */
 856	for (i = 0; i < tries; i++) {
 857		ret = intel_hdcp_auth(connector);
 858		if (!ret) {
 859			hdcp->hdcp_encrypted = true;
 860			return 0;
 861		}
 862
 863		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
 864
 865		/* Ensuring HDCP encryption and signalling are stopped. */
 866		_intel_hdcp_disable(connector);
 867	}
 868
 869	drm_dbg_kms(&dev_priv->drm,
 870		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
 871	return ret;
 872}
 873
 874static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 875{
 876	return container_of(hdcp, struct intel_connector, hdcp);
 877}
 878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879/* Implements Part 3 of the HDCP authorization procedure */
 880static int intel_hdcp_check_link(struct intel_connector *connector)
 881{
 
 882	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
 883	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 884	struct intel_hdcp *hdcp = &connector->hdcp;
 885	enum port port = dig_port->base.port;
 886	enum transcoder cpu_transcoder;
 887	int ret = 0;
 888
 889	mutex_lock(&hdcp->mutex);
 
 
 890	cpu_transcoder = hdcp->cpu_transcoder;
 891
 892	/* Check_link valid only when HDCP1.4 is enabled */
 893	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
 894	    !hdcp->hdcp_encrypted) {
 895		ret = -EINVAL;
 896		goto out;
 897	}
 898
 899	if (drm_WARN_ON(&dev_priv->drm,
 900			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
 901		drm_err(&dev_priv->drm,
 902			"%s:%d HDCP link stopped encryption,%x\n",
 903			connector->base.name, connector->base.base.id,
 904			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
 905		ret = -ENXIO;
 906		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 907		schedule_work(&hdcp->prop_work);
 
 908		goto out;
 909	}
 910
 911	if (hdcp->shim->check_link(dig_port)) {
 912		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
 913			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
 914			schedule_work(&hdcp->prop_work);
 915		}
 916		goto out;
 917	}
 918
 919	drm_dbg_kms(&dev_priv->drm,
 920		    "[%s:%d] HDCP link failed, retrying authentication\n",
 921		    connector->base.name, connector->base.base.id);
 922
 923	ret = _intel_hdcp_disable(connector);
 924	if (ret) {
 925		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
 926		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 927		schedule_work(&hdcp->prop_work);
 
 928		goto out;
 929	}
 930
 931	ret = _intel_hdcp_enable(connector);
 932	if (ret) {
 933		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
 934		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 935		schedule_work(&hdcp->prop_work);
 
 936		goto out;
 937	}
 938
 939out:
 
 940	mutex_unlock(&hdcp->mutex);
 941	return ret;
 942}
 943
 944static void intel_hdcp_prop_work(struct work_struct *work)
 945{
 946	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
 947					       prop_work);
 948	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
 949	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 950
 951	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
 952	mutex_lock(&hdcp->mutex);
 953
 954	/*
 955	 * This worker is only used to flip between ENABLED/DESIRED. Either of
 956	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
 957	 * we're running just after hdcp has been disabled, so just exit
 958	 */
 959	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
 960		drm_hdcp_update_content_protection(&connector->base,
 961						   hdcp->value);
 962
 963	mutex_unlock(&hdcp->mutex);
 964	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
 
 
 965}
 966
 967bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
 968{
 969	return INTEL_INFO(dev_priv)->display.has_hdcp &&
 970			(INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
 971}
 972
 973static int
 974hdcp2_prepare_ake_init(struct intel_connector *connector,
 975		       struct hdcp2_ake_init *ake_data)
 976{
 977	struct hdcp_port_data *data = &connector->hdcp.port_data;
 978	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 979	struct i915_hdcp_comp_master *comp;
 
 980	int ret;
 981
 982	mutex_lock(&dev_priv->hdcp_comp_mutex);
 983	comp = dev_priv->hdcp_master;
 984
 985	if (!comp || !comp->ops) {
 986		mutex_unlock(&dev_priv->hdcp_comp_mutex);
 987		return -EINVAL;
 988	}
 989
 990	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
 991	if (ret)
 992		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
 993			    ret);
 994	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 995
 996	return ret;
 997}
 998
 999static int
1000hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1001				struct hdcp2_ake_send_cert *rx_cert,
1002				bool *paired,
1003				struct hdcp2_ake_no_stored_km *ek_pub_km,
1004				size_t *msg_sz)
1005{
1006	struct hdcp_port_data *data = &connector->hdcp.port_data;
1007	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1008	struct i915_hdcp_comp_master *comp;
 
1009	int ret;
1010
1011	mutex_lock(&dev_priv->hdcp_comp_mutex);
1012	comp = dev_priv->hdcp_master;
1013
1014	if (!comp || !comp->ops) {
1015		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1016		return -EINVAL;
1017	}
1018
1019	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1020							 rx_cert, paired,
1021							 ek_pub_km, msg_sz);
1022	if (ret < 0)
1023		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1024			    ret);
1025	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1026
1027	return ret;
1028}
1029
1030static int hdcp2_verify_hprime(struct intel_connector *connector,
1031			       struct hdcp2_ake_send_hprime *rx_hprime)
1032{
1033	struct hdcp_port_data *data = &connector->hdcp.port_data;
1034	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1035	struct i915_hdcp_comp_master *comp;
 
1036	int ret;
1037
1038	mutex_lock(&dev_priv->hdcp_comp_mutex);
1039	comp = dev_priv->hdcp_master;
1040
1041	if (!comp || !comp->ops) {
1042		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1043		return -EINVAL;
1044	}
1045
1046	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1047	if (ret < 0)
1048		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1049	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1050
1051	return ret;
1052}
1053
1054static int
1055hdcp2_store_pairing_info(struct intel_connector *connector,
1056			 struct hdcp2_ake_send_pairing_info *pairing_info)
1057{
1058	struct hdcp_port_data *data = &connector->hdcp.port_data;
1059	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1060	struct i915_hdcp_comp_master *comp;
 
1061	int ret;
1062
1063	mutex_lock(&dev_priv->hdcp_comp_mutex);
1064	comp = dev_priv->hdcp_master;
1065
1066	if (!comp || !comp->ops) {
1067		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1068		return -EINVAL;
1069	}
1070
1071	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1072	if (ret < 0)
1073		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1074			    ret);
1075	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1076
1077	return ret;
1078}
1079
1080static int
1081hdcp2_prepare_lc_init(struct intel_connector *connector,
1082		      struct hdcp2_lc_init *lc_init)
1083{
1084	struct hdcp_port_data *data = &connector->hdcp.port_data;
1085	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1086	struct i915_hdcp_comp_master *comp;
 
1087	int ret;
1088
1089	mutex_lock(&dev_priv->hdcp_comp_mutex);
1090	comp = dev_priv->hdcp_master;
1091
1092	if (!comp || !comp->ops) {
1093		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1094		return -EINVAL;
1095	}
1096
1097	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1098	if (ret < 0)
1099		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1100			    ret);
1101	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1102
1103	return ret;
1104}
1105
1106static int
1107hdcp2_verify_lprime(struct intel_connector *connector,
1108		    struct hdcp2_lc_send_lprime *rx_lprime)
1109{
1110	struct hdcp_port_data *data = &connector->hdcp.port_data;
1111	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1112	struct i915_hdcp_comp_master *comp;
 
1113	int ret;
1114
1115	mutex_lock(&dev_priv->hdcp_comp_mutex);
1116	comp = dev_priv->hdcp_master;
1117
1118	if (!comp || !comp->ops) {
1119		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1120		return -EINVAL;
1121	}
1122
1123	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1124	if (ret < 0)
1125		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1126			    ret);
1127	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1128
1129	return ret;
1130}
1131
1132static int hdcp2_prepare_skey(struct intel_connector *connector,
1133			      struct hdcp2_ske_send_eks *ske_data)
1134{
1135	struct hdcp_port_data *data = &connector->hdcp.port_data;
1136	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1137	struct i915_hdcp_comp_master *comp;
 
1138	int ret;
1139
1140	mutex_lock(&dev_priv->hdcp_comp_mutex);
1141	comp = dev_priv->hdcp_master;
1142
1143	if (!comp || !comp->ops) {
1144		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1145		return -EINVAL;
1146	}
1147
1148	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1149	if (ret < 0)
1150		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1151			    ret);
1152	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1153
1154	return ret;
1155}
1156
1157static int
1158hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1159				      struct hdcp2_rep_send_receiverid_list
1160								*rep_topology,
1161				      struct hdcp2_rep_send_ack *rep_send_ack)
1162{
1163	struct hdcp_port_data *data = &connector->hdcp.port_data;
1164	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1165	struct i915_hdcp_comp_master *comp;
 
1166	int ret;
1167
1168	mutex_lock(&dev_priv->hdcp_comp_mutex);
1169	comp = dev_priv->hdcp_master;
1170
1171	if (!comp || !comp->ops) {
1172		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1173		return -EINVAL;
1174	}
1175
1176	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1177							 rep_topology,
1178							 rep_send_ack);
 
1179	if (ret < 0)
1180		drm_dbg_kms(&dev_priv->drm,
1181			    "Verify rep topology failed. %d\n", ret);
1182	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1183
1184	return ret;
1185}
1186
1187static int
1188hdcp2_verify_mprime(struct intel_connector *connector,
1189		    struct hdcp2_rep_stream_ready *stream_ready)
1190{
1191	struct hdcp_port_data *data = &connector->hdcp.port_data;
1192	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1193	struct i915_hdcp_comp_master *comp;
 
1194	int ret;
1195
1196	mutex_lock(&dev_priv->hdcp_comp_mutex);
1197	comp = dev_priv->hdcp_master;
1198
1199	if (!comp || !comp->ops) {
1200		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1201		return -EINVAL;
1202	}
1203
1204	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1205	if (ret < 0)
1206		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1207	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1208
1209	return ret;
1210}
1211
1212static int hdcp2_authenticate_port(struct intel_connector *connector)
1213{
1214	struct hdcp_port_data *data = &connector->hdcp.port_data;
1215	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1216	struct i915_hdcp_comp_master *comp;
 
1217	int ret;
1218
1219	mutex_lock(&dev_priv->hdcp_comp_mutex);
1220	comp = dev_priv->hdcp_master;
1221
1222	if (!comp || !comp->ops) {
1223		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1224		return -EINVAL;
1225	}
1226
1227	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1228	if (ret < 0)
1229		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1230			    ret);
1231	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1232
1233	return ret;
1234}
1235
1236static int hdcp2_close_mei_session(struct intel_connector *connector)
1237{
1238	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1239	struct i915_hdcp_comp_master *comp;
 
1240	int ret;
1241
1242	mutex_lock(&dev_priv->hdcp_comp_mutex);
1243	comp = dev_priv->hdcp_master;
1244
1245	if (!comp || !comp->ops) {
1246		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1247		return -EINVAL;
1248	}
1249
1250	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1251					     &connector->hdcp.port_data);
1252	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1253
1254	return ret;
1255}
1256
1257static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1258{
1259	return hdcp2_close_mei_session(connector);
1260}
1261
1262/* Authentication flow starts from here */
1263static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1264{
1265	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1266	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
1267	struct intel_hdcp *hdcp = &connector->hdcp;
1268	union {
1269		struct hdcp2_ake_init ake_init;
1270		struct hdcp2_ake_send_cert send_cert;
1271		struct hdcp2_ake_no_stored_km no_stored_km;
1272		struct hdcp2_ake_send_hprime send_hprime;
1273		struct hdcp2_ake_send_pairing_info pairing_info;
1274	} msgs;
1275	const struct intel_hdcp_shim *shim = hdcp->shim;
1276	size_t size;
1277	int ret;
1278
1279	/* Init for seq_num */
1280	hdcp->seq_num_v = 0;
1281	hdcp->seq_num_m = 0;
1282
 
 
 
 
 
 
1283	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1284	if (ret < 0)
1285		return ret;
1286
1287	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1288				  sizeof(msgs.ake_init));
1289	if (ret < 0)
1290		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291
1292	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1293				 &msgs.send_cert, sizeof(msgs.send_cert));
1294	if (ret < 0)
1295		return ret;
1296
1297	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1298		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1299		return -EINVAL;
1300	}
1301
1302	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1303
1304	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1305					msgs.send_cert.cert_rx.receiver_id,
1306					1) > 0) {
1307		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1308		return -EPERM;
1309	}
1310
1311	/*
1312	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1313	 * stored also.
1314	 */
1315	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1316					      &hdcp->is_paired,
1317					      &msgs.no_stored_km, &size);
1318	if (ret < 0)
1319		return ret;
1320
1321	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1322	if (ret < 0)
1323		return ret;
1324
1325	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1326				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1327	if (ret < 0)
1328		return ret;
1329
1330	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1331	if (ret < 0)
1332		return ret;
1333
1334	if (!hdcp->is_paired) {
1335		/* Pairing is required */
1336		ret = shim->read_2_2_msg(dig_port,
1337					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1338					 &msgs.pairing_info,
1339					 sizeof(msgs.pairing_info));
1340		if (ret < 0)
1341			return ret;
1342
1343		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1344		if (ret < 0)
1345			return ret;
1346		hdcp->is_paired = true;
1347	}
1348
1349	return 0;
1350}
1351
1352static int hdcp2_locality_check(struct intel_connector *connector)
1353{
1354	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1355	struct intel_hdcp *hdcp = &connector->hdcp;
1356	union {
1357		struct hdcp2_lc_init lc_init;
1358		struct hdcp2_lc_send_lprime send_lprime;
1359	} msgs;
1360	const struct intel_hdcp_shim *shim = hdcp->shim;
1361	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1362
1363	for (i = 0; i < tries; i++) {
1364		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1365		if (ret < 0)
1366			continue;
1367
1368		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1369				      sizeof(msgs.lc_init));
1370		if (ret < 0)
1371			continue;
1372
1373		ret = shim->read_2_2_msg(dig_port,
1374					 HDCP_2_2_LC_SEND_LPRIME,
1375					 &msgs.send_lprime,
1376					 sizeof(msgs.send_lprime));
1377		if (ret < 0)
1378			continue;
1379
1380		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1381		if (!ret)
1382			break;
1383	}
1384
1385	return ret;
1386}
1387
1388static int hdcp2_session_key_exchange(struct intel_connector *connector)
1389{
1390	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1391	struct intel_hdcp *hdcp = &connector->hdcp;
1392	struct hdcp2_ske_send_eks send_eks;
1393	int ret;
1394
1395	ret = hdcp2_prepare_skey(connector, &send_eks);
1396	if (ret < 0)
1397		return ret;
1398
1399	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1400					sizeof(send_eks));
1401	if (ret < 0)
1402		return ret;
1403
1404	return 0;
1405}
1406
1407static
1408int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1409{
1410	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1411	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1412	struct intel_hdcp *hdcp = &connector->hdcp;
1413	union {
1414		struct hdcp2_rep_stream_manage stream_manage;
1415		struct hdcp2_rep_stream_ready stream_ready;
1416	} msgs;
1417	const struct intel_hdcp_shim *shim = hdcp->shim;
1418	int ret;
 
 
 
1419
1420	/* Prepare RepeaterAuth_Stream_Manage msg */
1421	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1422	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1423
1424	/* K no of streams is fixed as 1. Stored as big-endian. */
1425	msgs.stream_manage.k = cpu_to_be16(1);
1426
1427	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1428	msgs.stream_manage.streams[0].stream_id = 0;
1429	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
 
1430
 
 
1431	/* Send it to Repeater */
1432	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1433				  sizeof(msgs.stream_manage));
1434	if (ret < 0)
1435		return ret;
1436
1437	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1438				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1439	if (ret < 0)
1440		return ret;
1441
1442	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1443	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1444
1445	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1446	if (ret < 0)
1447		return ret;
1448
 
1449	hdcp->seq_num_m++;
1450
1451	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1452		drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
1453		return -1;
1454	}
1455
1456	return 0;
1457}
1458
1459static
1460int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1461{
 
1462	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1463	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1464	struct intel_hdcp *hdcp = &connector->hdcp;
1465	union {
1466		struct hdcp2_rep_send_receiverid_list recvid_list;
1467		struct hdcp2_rep_send_ack rep_ack;
1468	} msgs;
1469	const struct intel_hdcp_shim *shim = hdcp->shim;
1470	u32 seq_num_v, device_cnt;
1471	u8 *rx_info;
1472	int ret;
1473
1474	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1475				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1476	if (ret < 0)
1477		return ret;
1478
1479	rx_info = msgs.recvid_list.rx_info;
1480
1481	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1482	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1483		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1484		return -EINVAL;
1485	}
1486
1487	/* Converting and Storing the seq_num_v to local variable as DWORD */
1488	seq_num_v =
1489		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1490
1491	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1492		drm_dbg_kms(&dev_priv->drm,
1493			    "Non zero Seq_num_v at first RecvId_List msg\n");
1494		return -EINVAL;
1495	}
1496
1497	if (seq_num_v < hdcp->seq_num_v) {
1498		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1499		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1500		return -EINVAL;
1501	}
1502
1503	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1504		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1505	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1506					msgs.recvid_list.receiver_ids,
1507					device_cnt) > 0) {
1508		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1509		return -EPERM;
1510	}
1511
1512	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1513						    &msgs.recvid_list,
1514						    &msgs.rep_ack);
1515	if (ret < 0)
1516		return ret;
1517
1518	hdcp->seq_num_v = seq_num_v;
1519	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1520				  sizeof(msgs.rep_ack));
1521	if (ret < 0)
1522		return ret;
1523
1524	return 0;
1525}
1526
1527static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1528{
1529	int ret;
1530
1531	ret = hdcp2_authenticate_repeater_topology(connector);
1532	if (ret < 0)
1533		return ret;
1534
1535	return hdcp2_propagate_stream_management_info(connector);
1536}
1537
1538static int hdcp2_authenticate_sink(struct intel_connector *connector)
1539{
1540	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1541	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1542	struct intel_hdcp *hdcp = &connector->hdcp;
1543	const struct intel_hdcp_shim *shim = hdcp->shim;
1544	int ret;
1545
1546	ret = hdcp2_authentication_key_exchange(connector);
1547	if (ret < 0) {
1548		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1549		return ret;
1550	}
1551
1552	ret = hdcp2_locality_check(connector);
1553	if (ret < 0) {
1554		drm_dbg_kms(&i915->drm,
1555			    "Locality Check failed. Err : %d\n", ret);
1556		return ret;
1557	}
1558
1559	ret = hdcp2_session_key_exchange(connector);
1560	if (ret < 0) {
1561		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1562		return ret;
1563	}
1564
1565	if (shim->config_stream_type) {
1566		ret = shim->config_stream_type(dig_port,
1567					       hdcp->is_repeater,
1568					       hdcp->content_type);
1569		if (ret < 0)
1570			return ret;
1571	}
1572
1573	if (hdcp->is_repeater) {
1574		ret = hdcp2_authenticate_repeater(connector);
1575		if (ret < 0) {
1576			drm_dbg_kms(&i915->drm,
1577				    "Repeater Auth Failed. Err: %d\n", ret);
1578			return ret;
1579		}
1580	}
1581
1582	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1583	ret = hdcp2_authenticate_port(connector);
1584	if (ret < 0)
1585		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1586
1587	return ret;
1588}
1589
1590static int hdcp2_enable_encryption(struct intel_connector *connector)
1591{
 
1592	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1593	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1594	struct intel_hdcp *hdcp = &connector->hdcp;
1595	enum port port = dig_port->base.port;
1596	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1597	int ret;
1598
1599	drm_WARN_ON(&dev_priv->drm,
1600		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1601		    LINK_ENCRYPTION_STATUS);
1602	if (hdcp->shim->toggle_signalling) {
1603		ret = hdcp->shim->toggle_signalling(dig_port, true);
 
1604		if (ret) {
1605			drm_err(&dev_priv->drm,
1606				"Failed to enable HDCP signalling. %d\n",
1607				ret);
1608			return ret;
1609		}
1610	}
1611
1612	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1613	    LINK_AUTH_STATUS) {
1614		/* Link is Authenticated. Now set for Encryption */
1615		intel_de_write(dev_priv,
1616			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1617			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1618	}
1619
1620	ret = intel_de_wait_for_set(dev_priv,
1621				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1622						 port),
1623				    LINK_ENCRYPTION_STATUS,
1624				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
 
1625
1626	return ret;
1627}
1628
1629static int hdcp2_disable_encryption(struct intel_connector *connector)
1630{
 
1631	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1632	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1633	struct intel_hdcp *hdcp = &connector->hdcp;
1634	enum port port = dig_port->base.port;
1635	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1636	int ret;
1637
1638	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1639				      LINK_ENCRYPTION_STATUS));
 
1640
1641	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1642		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1643
1644	ret = intel_de_wait_for_clear(dev_priv,
1645				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1646						   port),
1647				      LINK_ENCRYPTION_STATUS,
1648				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1649	if (ret == -ETIMEDOUT)
1650		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1651
1652	if (hdcp->shim->toggle_signalling) {
1653		ret = hdcp->shim->toggle_signalling(dig_port, false);
 
1654		if (ret) {
1655			drm_err(&dev_priv->drm,
1656				"Failed to disable HDCP signalling. %d\n",
1657				ret);
1658			return ret;
1659		}
1660	}
1661
1662	return ret;
1663}
1664
1665static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 
1666{
1667	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1668	int ret, i, tries = 3;
 
 
 
1669
1670	for (i = 0; i < tries; i++) {
1671		ret = hdcp2_authenticate_sink(connector);
1672		if (!ret)
1673			break;
1674
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675		/* Clearing the mei hdcp session */
1676		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1677			    i + 1, tries, ret);
1678		if (hdcp2_deauthenticate_port(connector) < 0)
1679			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1680	}
1681
1682	if (i != tries) {
1683		/*
1684		 * Ensuring the required 200mSec min time interval between
1685		 * Session Key Exchange and encryption.
1686		 */
1687		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1688		ret = hdcp2_enable_encryption(connector);
1689		if (ret < 0) {
1690			drm_dbg_kms(&i915->drm,
1691				    "Encryption Enable Failed.(%d)\n", ret);
1692			if (hdcp2_deauthenticate_port(connector) < 0)
1693				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1694		}
1695	}
1696
 
 
 
1697	return ret;
1698}
1699
1700static int _intel_hdcp2_enable(struct intel_connector *connector)
 
1701{
1702	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1703	struct intel_hdcp *hdcp = &connector->hdcp;
1704	int ret;
1705
1706	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1707		    connector->base.name, connector->base.base.id,
1708		    hdcp->content_type);
1709
1710	ret = hdcp2_authenticate_and_encrypt(connector);
 
 
1711	if (ret) {
1712		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1713			    hdcp->content_type, ret);
1714		return ret;
1715	}
1716
1717	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1718		    connector->base.name, connector->base.base.id,
1719		    hdcp->content_type);
1720
1721	hdcp->hdcp2_encrypted = true;
1722	return 0;
1723}
1724
1725static int _intel_hdcp2_disable(struct intel_connector *connector)
 
1726{
1727	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 
 
 
1728	int ret;
1729
1730	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1731		    connector->base.name, connector->base.base.id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732
1733	ret = hdcp2_disable_encryption(connector);
1734
1735	if (hdcp2_deauthenticate_port(connector) < 0)
1736		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1737
1738	connector->hdcp.hdcp2_encrypted = false;
 
 
1739
1740	return ret;
1741}
1742
1743/* Implements the Link Integrity Check for HDCP2.2 */
1744static int intel_hdcp2_check_link(struct intel_connector *connector)
1745{
 
1746	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1747	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1748	struct intel_hdcp *hdcp = &connector->hdcp;
1749	enum port port = dig_port->base.port;
1750	enum transcoder cpu_transcoder;
1751	int ret = 0;
1752
1753	mutex_lock(&hdcp->mutex);
 
1754	cpu_transcoder = hdcp->cpu_transcoder;
1755
1756	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1757	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1758	    !hdcp->hdcp2_encrypted) {
1759		ret = -EINVAL;
1760		goto out;
1761	}
1762
1763	if (drm_WARN_ON(&dev_priv->drm,
1764			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1765		drm_err(&dev_priv->drm,
1766			"HDCP2.2 link stopped the encryption, %x\n",
1767			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1768		ret = -ENXIO;
1769		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1770		schedule_work(&hdcp->prop_work);
 
 
1771		goto out;
1772	}
1773
1774	ret = hdcp->shim->check_2_2_link(dig_port);
1775	if (ret == HDCP_LINK_PROTECTED) {
1776		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1777			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1778			schedule_work(&hdcp->prop_work);
 
1779		}
1780		goto out;
1781	}
1782
1783	if (ret == HDCP_TOPOLOGY_CHANGE) {
1784		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1785			goto out;
1786
1787		drm_dbg_kms(&dev_priv->drm,
1788			    "HDCP2.2 Downstream topology change\n");
 
1789		ret = hdcp2_authenticate_repeater_topology(connector);
1790		if (!ret) {
1791			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1792			schedule_work(&hdcp->prop_work);
 
1793			goto out;
1794		}
1795		drm_dbg_kms(&dev_priv->drm,
1796			    "[%s:%d] Repeater topology auth failed.(%d)\n",
1797			    connector->base.name, connector->base.base.id,
 
1798			    ret);
1799	} else {
1800		drm_dbg_kms(&dev_priv->drm,
1801			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1802			    connector->base.name, connector->base.base.id);
1803	}
1804
1805	ret = _intel_hdcp2_disable(connector);
1806	if (ret) {
1807		drm_err(&dev_priv->drm,
1808			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1809			connector->base.name, connector->base.base.id, ret);
1810		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1811		schedule_work(&hdcp->prop_work);
1812		goto out;
1813	}
1814
1815	ret = _intel_hdcp2_enable(connector);
1816	if (ret) {
1817		drm_dbg_kms(&dev_priv->drm,
1818			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1819			    connector->base.name, connector->base.base.id,
1820			    ret);
1821		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1822		schedule_work(&hdcp->prop_work);
1823		goto out;
1824	}
1825
 
 
1826out:
 
1827	mutex_unlock(&hdcp->mutex);
1828	return ret;
1829}
1830
1831static void intel_hdcp_check_work(struct work_struct *work)
1832{
1833	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1834					       struct intel_hdcp,
1835					       check_work);
1836	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
 
 
 
 
 
1837
1838	if (!intel_hdcp2_check_link(connector))
1839		schedule_delayed_work(&hdcp->check_work,
1840				      DRM_HDCP2_CHECK_PERIOD_MS);
1841	else if (!intel_hdcp_check_link(connector))
1842		schedule_delayed_work(&hdcp->check_work,
1843				      DRM_HDCP_CHECK_PERIOD_MS);
1844}
1845
1846static int i915_hdcp_component_bind(struct device *i915_kdev,
1847				    struct device *mei_kdev, void *data)
1848{
1849	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1850
1851	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
1852	mutex_lock(&dev_priv->hdcp_comp_mutex);
1853	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1854	dev_priv->hdcp_master->mei_dev = mei_kdev;
1855	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1856
1857	return 0;
1858}
1859
1860static void i915_hdcp_component_unbind(struct device *i915_kdev,
1861				       struct device *mei_kdev, void *data)
1862{
1863	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1864
1865	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
1866	mutex_lock(&dev_priv->hdcp_comp_mutex);
1867	dev_priv->hdcp_master = NULL;
1868	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1869}
1870
1871static const struct component_ops i915_hdcp_component_ops = {
1872	.bind   = i915_hdcp_component_bind,
1873	.unbind = i915_hdcp_component_unbind,
1874};
1875
1876static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1877{
1878	switch (port) {
1879	case PORT_A:
1880		return MEI_DDI_A;
1881	case PORT_B ... PORT_F:
1882		return (enum mei_fw_ddi)port;
1883	default:
1884		return MEI_DDI_INVALID_PORT;
1885	}
1886}
1887
1888static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1889{
1890	switch (cpu_transcoder) {
1891	case TRANSCODER_A ... TRANSCODER_D:
1892		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1893	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1894		return MEI_INVALID_TRANSCODER;
1895	}
1896}
1897
1898static int initialize_hdcp_port_data(struct intel_connector *connector,
 
1899				     const struct intel_hdcp_shim *shim)
1900{
1901	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1902	struct intel_hdcp *hdcp = &connector->hdcp;
1903	struct hdcp_port_data *data = &hdcp->port_data;
1904
1905	if (INTEL_GEN(dev_priv) < 12)
1906		data->fw_ddi =
1907			intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
1908	else
1909		/*
1910		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1911		 * with zero(INVALID PORT index).
1912		 */
1913		data->fw_ddi = MEI_DDI_INVALID_PORT;
1914
1915	/*
1916	 * As associated transcoder is set and modified at modeset, here fw_tc
1917	 * is initialized to zero (invalid transcoder index). This will be
1918	 * retained for <Gen12 forever.
1919	 */
1920	data->fw_tc = MEI_INVALID_TRANSCODER;
1921
1922	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1923	data->protocol = (u8)shim->protocol;
1924
1925	data->k = 1;
1926	if (!data->streams)
1927		data->streams = kcalloc(data->k,
1928					sizeof(struct hdcp2_streamid_type),
1929					GFP_KERNEL);
1930	if (!data->streams) {
1931		drm_err(&dev_priv->drm, "Out of Memory\n");
1932		return -ENOMEM;
1933	}
1934
1935	data->streams[0].stream_id = 0;
1936	data->streams[0].stream_type = hdcp->content_type;
1937
1938	return 0;
1939}
1940
1941static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1942{
 
 
 
 
 
1943	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1944		return false;
1945
1946	return (INTEL_GEN(dev_priv) >= 10 ||
1947		IS_GEMINILAKE(dev_priv) ||
1948		IS_KABYLAKE(dev_priv) ||
1949		IS_COFFEELAKE(dev_priv) ||
1950		IS_COMETLAKE(dev_priv));
1951}
1952
1953void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1954{
1955	int ret;
1956
1957	if (!is_hdcp2_supported(dev_priv))
1958		return;
1959
1960	mutex_lock(&dev_priv->hdcp_comp_mutex);
1961	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
 
 
 
 
 
 
 
 
1962
1963	dev_priv->hdcp_comp_added = true;
1964	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1965	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1966				  I915_COMPONENT_HDCP);
1967	if (ret < 0) {
1968		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
1969			    ret);
1970		mutex_lock(&dev_priv->hdcp_comp_mutex);
1971		dev_priv->hdcp_comp_added = false;
1972		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1973		return;
1974	}
1975}
1976
1977static void intel_hdcp2_init(struct intel_connector *connector,
 
1978			     const struct intel_hdcp_shim *shim)
1979{
1980	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1981	struct intel_hdcp *hdcp = &connector->hdcp;
1982	int ret;
1983
1984	ret = initialize_hdcp_port_data(connector, shim);
1985	if (ret) {
1986		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
1987		return;
1988	}
1989
1990	hdcp->hdcp2_supported = true;
1991}
1992
1993int intel_hdcp_init(struct intel_connector *connector,
 
1994		    const struct intel_hdcp_shim *shim)
1995{
1996	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1997	struct intel_hdcp *hdcp = &connector->hdcp;
1998	int ret;
1999
2000	if (!shim)
2001		return -EINVAL;
2002
2003	if (is_hdcp2_supported(dev_priv))
2004		intel_hdcp2_init(connector, shim);
2005
2006	ret =
2007	drm_connector_attach_content_protection_property(&connector->base,
2008							 hdcp->hdcp2_supported);
2009	if (ret) {
2010		hdcp->hdcp2_supported = false;
2011		kfree(hdcp->port_data.streams);
2012		return ret;
2013	}
2014
2015	hdcp->shim = shim;
2016	mutex_init(&hdcp->mutex);
2017	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2018	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2019	init_waitqueue_head(&hdcp->cp_irq_queue);
2020
2021	return 0;
2022}
2023
2024int intel_hdcp_enable(struct intel_connector *connector,
2025		      enum transcoder cpu_transcoder, u8 content_type)
 
 
2026{
2027	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
 
 
 
2028	struct intel_hdcp *hdcp = &connector->hdcp;
2029	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2030	int ret = -EINVAL;
2031
2032	if (!hdcp->shim)
2033		return -ENOENT;
2034
 
 
 
 
 
 
2035	mutex_lock(&hdcp->mutex);
2036	drm_WARN_ON(&dev_priv->drm,
 
2037		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2038	hdcp->content_type = content_type;
2039
2040	if (INTEL_GEN(dev_priv) >= 12) {
2041		hdcp->cpu_transcoder = cpu_transcoder;
2042		hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
 
 
 
2043	}
2044
 
 
 
 
2045	/*
2046	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2047	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2048	 */
2049	if (intel_hdcp2_capable(connector)) {
2050		ret = _intel_hdcp2_enable(connector);
2051		if (!ret)
2052			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
 
2053	}
2054
2055	/*
2056	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2057	 * be attempted.
2058	 */
2059	if (ret && intel_hdcp_capable(connector) &&
2060	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2061		ret = _intel_hdcp_enable(connector);
2062	}
2063
2064	if (!ret) {
2065		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2066		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
2067		schedule_work(&hdcp->prop_work);
 
 
2068	}
2069
 
2070	mutex_unlock(&hdcp->mutex);
2071	return ret;
2072}
2073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2074int intel_hdcp_disable(struct intel_connector *connector)
2075{
 
2076	struct intel_hdcp *hdcp = &connector->hdcp;
2077	int ret = 0;
2078
2079	if (!hdcp->shim)
2080		return -ENOENT;
2081
2082	mutex_lock(&hdcp->mutex);
 
 
 
 
2083
2084	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2085		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
2086		if (hdcp->hdcp2_encrypted)
2087			ret = _intel_hdcp2_disable(connector);
2088		else if (hdcp->hdcp_encrypted)
2089			ret = _intel_hdcp_disable(connector);
2090	}
2091
 
 
2092	mutex_unlock(&hdcp->mutex);
2093	cancel_delayed_work_sync(&hdcp->check_work);
2094	return ret;
2095}
2096
2097void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2098			    struct intel_encoder *encoder,
2099			    const struct intel_crtc_state *crtc_state,
2100			    const struct drm_connector_state *conn_state)
2101{
2102	struct intel_connector *connector =
2103				to_intel_connector(conn_state->connector);
2104	struct intel_hdcp *hdcp = &connector->hdcp;
2105	bool content_protection_type_changed =
 
 
 
 
 
 
2106		(conn_state->hdcp_content_type != hdcp->content_type &&
2107		 conn_state->content_protection !=
2108		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2109	bool desired_and_not_enabled = false;
2110
2111	/*
2112	 * During the HDCP encryption session if Type change is requested,
2113	 * disable the HDCP and reenable it with new TYPE value.
2114	 */
2115	if (conn_state->content_protection ==
2116	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2117	    content_protection_type_changed)
2118		intel_hdcp_disable(connector);
2119
2120	/*
2121	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2122	 * change procedure.
2123	 */
2124	if (content_protection_type_changed) {
2125		mutex_lock(&hdcp->mutex);
2126		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2127		schedule_work(&hdcp->prop_work);
 
 
2128		mutex_unlock(&hdcp->mutex);
2129	}
2130
2131	if (conn_state->content_protection ==
2132	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2133		mutex_lock(&hdcp->mutex);
2134		/* Avoid enabling hdcp, if it already ENABLED */
2135		desired_and_not_enabled =
2136			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2137		mutex_unlock(&hdcp->mutex);
 
 
 
 
 
 
 
 
 
 
2138	}
2139
2140	if (desired_and_not_enabled || content_protection_type_changed)
2141		intel_hdcp_enable(connector,
2142				  crtc_state->cpu_transcoder,
2143				  (u8)conn_state->hdcp_content_type);
2144}
2145
2146void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2147{
2148	mutex_lock(&dev_priv->hdcp_comp_mutex);
2149	if (!dev_priv->hdcp_comp_added) {
2150		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2151		return;
2152	}
2153
2154	dev_priv->hdcp_comp_added = false;
2155	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2156
2157	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
 
 
 
2158}
2159
2160void intel_hdcp_cleanup(struct intel_connector *connector)
2161{
2162	if (!connector->hdcp.shim)
 
 
2163		return;
2164
2165	mutex_lock(&connector->hdcp.mutex);
2166	kfree(connector->hdcp.port_data.streams);
2167	mutex_unlock(&connector->hdcp.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2168}
2169
2170void intel_hdcp_atomic_check(struct drm_connector *connector,
2171			     struct drm_connector_state *old_state,
2172			     struct drm_connector_state *new_state)
2173{
2174	u64 old_cp = old_state->content_protection;
2175	u64 new_cp = new_state->content_protection;
2176	struct drm_crtc_state *crtc_state;
2177
2178	if (!new_state->crtc) {
2179		/*
2180		 * If the connector is being disabled with CP enabled, mark it
2181		 * desired so it's re-enabled when the connector is brought back
2182		 */
2183		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2184			new_state->content_protection =
2185				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2186		return;
2187	}
2188
2189	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2190						   new_state->crtc);
2191	/*
2192	 * Fix the HDCP uapi content protection state in case of modeset.
2193	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2194	 * need to be sent if there is transition from ENABLED->DESIRED.
2195	 */
2196	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2197	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2198	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2199		new_state->content_protection =
2200			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2201
2202	/*
2203	 * Nothing to do if the state didn't change, or HDCP was activated since
2204	 * the last commit. And also no change in hdcp content type.
2205	 */
2206	if (old_cp == new_cp ||
2207	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2208	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2209		if (old_state->hdcp_content_type ==
2210				new_state->hdcp_content_type)
2211			return;
2212	}
2213
2214	crtc_state->mode_changed = true;
2215}
2216
2217/* Handles the CP_IRQ raised from the DP HDCP sink */
2218void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2219{
2220	struct intel_hdcp *hdcp = &connector->hdcp;
 
 
2221
2222	if (!hdcp->shim)
2223		return;
2224
2225	atomic_inc(&connector->hdcp.cp_irq_count);
2226	wake_up_all(&connector->hdcp.cp_irq_queue);
2227
2228	schedule_delayed_work(&hdcp->check_work, 0);
2229}