Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2013 Red Hat
   5 * Author: Rob Clark <robdclark@gmail.com>
   6 */
   7
   8#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
   9#include <linux/debugfs.h>
  10#include <linux/kthread.h>
  11#include <linux/seq_file.h>
  12
  13#include <drm/drm_crtc.h>
  14#include <drm/drm_file.h>
  15#include <drm/drm_probe_helper.h>
  16
  17#include "msm_drv.h"
  18#include "dpu_kms.h"
  19#include "dpu_hwio.h"
  20#include "dpu_hw_catalog.h"
  21#include "dpu_hw_intf.h"
  22#include "dpu_hw_ctl.h"
  23#include "dpu_hw_dspp.h"
  24#include "dpu_formats.h"
  25#include "dpu_encoder_phys.h"
  26#include "dpu_crtc.h"
  27#include "dpu_trace.h"
  28#include "dpu_core_irq.h"
  29
  30#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
  31		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  32
  33#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
  34		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  35
  36#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
  37		(p) ? (p)->parent->base.id : -1, \
  38		(p) ? (p)->intf_idx - INTF_0 : -1, \
  39		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  40		##__VA_ARGS__)
  41
  42#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
  43		(p) ? (p)->parent->base.id : -1, \
  44		(p) ? (p)->intf_idx - INTF_0 : -1, \
  45		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  46		##__VA_ARGS__)
  47
  48/*
  49 * Two to anticipate panels that can do cmd/vid dynamic switching
  50 * plan is to create all possible physical encoder types, and switch between
  51 * them at runtime
  52 */
  53#define NUM_PHYS_ENCODER_TYPES 2
  54
  55#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
  56	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
  57
  58#define MAX_CHANNELS_PER_ENC 2
  59
  60#define IDLE_SHORT_TIMEOUT	1
  61
  62#define MAX_HDISPLAY_SPLIT 1080
  63
  64/* timeout in frames waiting for frame done */
  65#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
  66
  67/**
  68 * enum dpu_enc_rc_events - events for resource control state machine
  69 * @DPU_ENC_RC_EVENT_KICKOFF:
  70 *	This event happens at NORMAL priority.
  71 *	Event that signals the start of the transfer. When this event is
  72 *	received, enable MDP/DSI core clocks. Regardless of the previous
  73 *	state, the resource should be in ON state at the end of this event.
  74 * @DPU_ENC_RC_EVENT_FRAME_DONE:
  75 *	This event happens at INTERRUPT level.
  76 *	Event signals the end of the data transfer after the PP FRAME_DONE
  77 *	event. At the end of this event, a delayed work is scheduled to go to
  78 *	IDLE_PC state after IDLE_TIMEOUT time.
  79 * @DPU_ENC_RC_EVENT_PRE_STOP:
  80 *	This event happens at NORMAL priority.
  81 *	This event, when received during the ON state, leave the RC STATE
  82 *	in the PRE_OFF state. It should be followed by the STOP event as
  83 *	part of encoder disable.
  84 *	If received during IDLE or OFF states, it will do nothing.
  85 * @DPU_ENC_RC_EVENT_STOP:
  86 *	This event happens at NORMAL priority.
  87 *	When this event is received, disable all the MDP/DSI core clocks, and
  88 *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
  89 *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
  90 *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
  91 *	Resource state should be in OFF at the end of the event.
  92 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
  93 *	This event happens at NORMAL priority from a work item.
  94 *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
  95 *	This would disable MDP/DSI core clocks and change the resource state
  96 *	to IDLE.
  97 */
  98enum dpu_enc_rc_events {
  99	DPU_ENC_RC_EVENT_KICKOFF = 1,
 100	DPU_ENC_RC_EVENT_FRAME_DONE,
 101	DPU_ENC_RC_EVENT_PRE_STOP,
 102	DPU_ENC_RC_EVENT_STOP,
 103	DPU_ENC_RC_EVENT_ENTER_IDLE
 104};
 105
 106/*
 107 * enum dpu_enc_rc_states - states that the resource control maintains
 108 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
 109 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
 110 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
 111 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
 112 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
 113 */
 114enum dpu_enc_rc_states {
 115	DPU_ENC_RC_STATE_OFF,
 116	DPU_ENC_RC_STATE_PRE_OFF,
 117	DPU_ENC_RC_STATE_ON,
 118	DPU_ENC_RC_STATE_IDLE
 119};
 120
 121/**
 122 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
 123 *	encoders. Virtual encoder manages one "logical" display. Physical
 124 *	encoders manage one intf block, tied to a specific panel/sub-panel.
 125 *	Virtual encoder defers as much as possible to the physical encoders.
 126 *	Virtual encoder registers itself with the DRM Framework as the encoder.
 127 * @base:		drm_encoder base class for registration with DRM
 128 * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
 129 * @bus_scaling_client:	Client handle to the bus scaling interface
 130 * @enabled:		True if the encoder is active, protected by enc_lock
 131 * @num_phys_encs:	Actual number of physical encoders contained.
 132 * @phys_encs:		Container of physical encoders managed.
 133 * @cur_master:		Pointer to the current master in this mode. Optimization
 134 *			Only valid after enable. Cleared as disable.
 135 * @hw_pp		Handle to the pingpong blocks used for the display. No.
 136 *			pingpong blocks can be different than num_phys_encs.
 137 * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
 138 *			for partial update right-only cases, such as pingpong
 139 *			split where virtual pingpong does not generate IRQs
 140 * @crtc:		Pointer to the currently assigned crtc. Normally you
 141 *			would use crtc->state->encoder_mask to determine the
 142 *			link between encoder/crtc. However in this case we need
 143 *			to track crtc in the disable() hook which is called
 144 *			_after_ encoder_mask is cleared.
 145 * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
 146 *				all CTL paths
 147 * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
 148 * @debugfs_root:		Debug file system root file node
 149 * @enc_lock:			Lock around physical encoder
 150 *				create/destroy/enable/disable
 151 * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
 152 *				busy processing current command.
 153 *				Bit0 = phys_encs[0] etc.
 154 * @crtc_frame_event_cb:	callback handler for frame event
 155 * @crtc_frame_event_cb_data:	callback handler private data
 156 * @frame_done_timeout_ms:	frame done timeout in ms
 157 * @frame_done_timer:		watchdog timer for frame done event
 158 * @vsync_event_timer:		vsync timer
 159 * @disp_info:			local copy of msm_display_info struct
 160 * @idle_pc_supported:		indicate if idle power collaps is supported
 161 * @rc_lock:			resource control mutex lock to protect
 162 *				virt encoder over various state changes
 163 * @rc_state:			resource controller state
 164 * @delayed_off_work:		delayed worker to schedule disabling of
 165 *				clks and resources after IDLE_TIMEOUT time.
 166 * @vsync_event_work:		worker to handle vsync event for autorefresh
 167 * @topology:                   topology of the display
 168 * @idle_timeout:		idle timeout duration in milliseconds
 169 */
 170struct dpu_encoder_virt {
 171	struct drm_encoder base;
 172	spinlock_t enc_spinlock;
 173	uint32_t bus_scaling_client;
 174
 175	bool enabled;
 176
 177	unsigned int num_phys_encs;
 178	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 179	struct dpu_encoder_phys *cur_master;
 180	struct dpu_encoder_phys *cur_slave;
 181	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 182
 183	bool intfs_swapped;
 184
 185	struct drm_crtc *crtc;
 186
 187	struct dentry *debugfs_root;
 188	struct mutex enc_lock;
 189	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
 190	void (*crtc_frame_event_cb)(void *, u32 event);
 191	void *crtc_frame_event_cb_data;
 192
 193	atomic_t frame_done_timeout_ms;
 194	struct timer_list frame_done_timer;
 195	struct timer_list vsync_event_timer;
 196
 197	struct msm_display_info disp_info;
 198
 199	bool idle_pc_supported;
 200	struct mutex rc_lock;
 201	enum dpu_enc_rc_states rc_state;
 202	struct delayed_work delayed_off_work;
 203	struct kthread_work vsync_event_work;
 204	struct msm_display_topology topology;
 205
 206	u32 idle_timeout;
 207};
 208
 209#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
 210
 211static u32 dither_matrix[DITHER_MATRIX_SZ] = {
 212	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
 213};
 214
 215static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
 216{
 217	struct dpu_hw_dither_cfg dither_cfg = { 0 };
 218
 219	if (!hw_pp->ops.setup_dither)
 220		return;
 221
 222	switch (bpc) {
 223	case 6:
 224		dither_cfg.c0_bitdepth = 6;
 225		dither_cfg.c1_bitdepth = 6;
 226		dither_cfg.c2_bitdepth = 6;
 227		dither_cfg.c3_bitdepth = 6;
 228		dither_cfg.temporal_en = 0;
 229		break;
 230	default:
 231		hw_pp->ops.setup_dither(hw_pp, NULL);
 232		return;
 233	}
 234
 235	memcpy(&dither_cfg.matrix, dither_matrix,
 236			sizeof(u32) * DITHER_MATRIX_SZ);
 237
 238	hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
 239}
 240
 241void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
 242		enum dpu_intr_idx intr_idx)
 243{
 244	DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
 245		  DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
 246		  phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
 247
 248	if (phys_enc->parent_ops->handle_frame_done)
 249		phys_enc->parent_ops->handle_frame_done(
 250				phys_enc->parent, phys_enc,
 251				DPU_ENCODER_FRAME_EVENT_ERROR);
 252}
 253
 254static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
 255		int32_t hw_id, struct dpu_encoder_wait_info *info);
 256
 257int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 258		enum dpu_intr_idx intr_idx,
 259		struct dpu_encoder_wait_info *wait_info)
 260{
 261	struct dpu_encoder_irq *irq;
 262	u32 irq_status;
 263	int ret;
 264
 265	if (!wait_info || intr_idx >= INTR_IDX_MAX) {
 266		DPU_ERROR("invalid params\n");
 267		return -EINVAL;
 268	}
 269	irq = &phys_enc->irq[intr_idx];
 270
 271	/* note: do master / slave checking outside */
 272
 273	/* return EWOULDBLOCK since we know the wait isn't necessary */
 274	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
 275		DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
 276			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 277			  irq->irq_idx);
 278		return -EWOULDBLOCK;
 279	}
 280
 281	if (irq->irq_idx < 0) {
 282		DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
 283			      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 284			      irq->name);
 285		return 0;
 286	}
 287
 288	DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
 289		      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 290		      irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
 291		      atomic_read(wait_info->atomic_cnt));
 292
 293	ret = dpu_encoder_helper_wait_event_timeout(
 294			DRMID(phys_enc->parent),
 295			irq->hw_idx,
 296			wait_info);
 297
 298	if (ret <= 0) {
 299		irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
 300				irq->irq_idx, true);
 301		if (irq_status) {
 302			unsigned long flags;
 303
 304			DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
 305				      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
 306				      DRMID(phys_enc->parent), intr_idx,
 307				      irq->hw_idx, irq->irq_idx,
 308				      phys_enc->hw_pp->idx - PINGPONG_0,
 309				      atomic_read(wait_info->atomic_cnt));
 310			local_irq_save(flags);
 311			irq->cb.func(phys_enc, irq->irq_idx);
 312			local_irq_restore(flags);
 313			ret = 0;
 314		} else {
 315			ret = -ETIMEDOUT;
 316			DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
 317				      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
 318				      DRMID(phys_enc->parent), intr_idx,
 319				      irq->hw_idx, irq->irq_idx,
 320				      phys_enc->hw_pp->idx - PINGPONG_0,
 321				      atomic_read(wait_info->atomic_cnt));
 322		}
 323	} else {
 324		ret = 0;
 325		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
 326			intr_idx, irq->hw_idx, irq->irq_idx,
 327			phys_enc->hw_pp->idx - PINGPONG_0,
 328			atomic_read(wait_info->atomic_cnt));
 329	}
 330
 331	return ret;
 332}
 333
 334int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 335		enum dpu_intr_idx intr_idx)
 336{
 337	struct dpu_encoder_irq *irq;
 338	int ret = 0;
 339
 340	if (intr_idx >= INTR_IDX_MAX) {
 341		DPU_ERROR("invalid params\n");
 342		return -EINVAL;
 343	}
 344	irq = &phys_enc->irq[intr_idx];
 345
 346	if (irq->irq_idx >= 0) {
 347		DPU_DEBUG_PHYS(phys_enc,
 348				"skipping already registered irq %s type %d\n",
 349				irq->name, irq->intr_type);
 350		return 0;
 351	}
 352
 353	irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
 354			irq->intr_type, irq->hw_idx);
 355	if (irq->irq_idx < 0) {
 356		DPU_ERROR_PHYS(phys_enc,
 357			"failed to lookup IRQ index for %s type:%d\n",
 358			irq->name, irq->intr_type);
 359		return -EINVAL;
 360	}
 361
 362	ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
 363			&irq->cb);
 364	if (ret) {
 365		DPU_ERROR_PHYS(phys_enc,
 366			"failed to register IRQ callback for %s\n",
 367			irq->name);
 368		irq->irq_idx = -EINVAL;
 369		return ret;
 370	}
 371
 372	ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
 373	if (ret) {
 374		DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
 375			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 376			  irq->irq_idx);
 377		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
 378				irq->irq_idx, &irq->cb);
 379		irq->irq_idx = -EINVAL;
 380		return ret;
 381	}
 382
 383	trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
 384				irq->hw_idx, irq->irq_idx);
 385
 386	return ret;
 387}
 388
 389int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
 390		enum dpu_intr_idx intr_idx)
 391{
 392	struct dpu_encoder_irq *irq;
 393	int ret;
 394
 395	irq = &phys_enc->irq[intr_idx];
 396
 397	/* silently skip irqs that weren't registered */
 398	if (irq->irq_idx < 0) {
 399		DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
 400			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 401			  irq->irq_idx);
 402		return 0;
 403	}
 404
 405	ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
 406	if (ret) {
 407		DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
 408			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 409			  irq->irq_idx, ret);
 410	}
 411
 412	ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
 413			&irq->cb);
 414	if (ret) {
 415		DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
 416			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 417			  irq->irq_idx, ret);
 418	}
 419
 420	trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
 421					     irq->hw_idx, irq->irq_idx);
 422
 423	irq->irq_idx = -EINVAL;
 424
 425	return 0;
 426}
 427
 428void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
 429				  struct dpu_encoder_hw_resources *hw_res)
 430{
 431	struct dpu_encoder_virt *dpu_enc = NULL;
 432	int i = 0;
 433
 434	dpu_enc = to_dpu_encoder_virt(drm_enc);
 435	DPU_DEBUG_ENC(dpu_enc, "\n");
 436
 437	/* Query resources used by phys encs, expected to be without overlap */
 438	memset(hw_res, 0, sizeof(*hw_res));
 439
 440	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 441		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 442
 443		if (phys->ops.get_hw_resources)
 444			phys->ops.get_hw_resources(phys, hw_res);
 445	}
 446}
 447
 448static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 449{
 450	struct dpu_encoder_virt *dpu_enc = NULL;
 451	int i = 0;
 452
 453	if (!drm_enc) {
 454		DPU_ERROR("invalid encoder\n");
 455		return;
 456	}
 457
 458	dpu_enc = to_dpu_encoder_virt(drm_enc);
 459	DPU_DEBUG_ENC(dpu_enc, "\n");
 460
 461	mutex_lock(&dpu_enc->enc_lock);
 462
 463	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 464		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 465
 466		if (phys->ops.destroy) {
 467			phys->ops.destroy(phys);
 468			--dpu_enc->num_phys_encs;
 469			dpu_enc->phys_encs[i] = NULL;
 470		}
 471	}
 472
 473	if (dpu_enc->num_phys_encs)
 474		DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
 475				dpu_enc->num_phys_encs);
 476	dpu_enc->num_phys_encs = 0;
 477	mutex_unlock(&dpu_enc->enc_lock);
 478
 479	drm_encoder_cleanup(drm_enc);
 480	mutex_destroy(&dpu_enc->enc_lock);
 481}
 482
 483void dpu_encoder_helper_split_config(
 484		struct dpu_encoder_phys *phys_enc,
 485		enum dpu_intf interface)
 486{
 487	struct dpu_encoder_virt *dpu_enc;
 488	struct split_pipe_cfg cfg = { 0 };
 489	struct dpu_hw_mdp *hw_mdptop;
 490	struct msm_display_info *disp_info;
 491
 492	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
 493		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
 494		return;
 495	}
 496
 497	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
 498	hw_mdptop = phys_enc->hw_mdptop;
 499	disp_info = &dpu_enc->disp_info;
 500
 501	if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
 502		return;
 503
 504	/**
 505	 * disable split modes since encoder will be operating in as the only
 506	 * encoder, either for the entire use case in the case of, for example,
 507	 * single DSI, or for this frame in the case of left/right only partial
 508	 * update.
 509	 */
 510	if (phys_enc->split_role == ENC_ROLE_SOLO) {
 511		if (hw_mdptop->ops.setup_split_pipe)
 512			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
 513		return;
 514	}
 515
 516	cfg.en = true;
 517	cfg.mode = phys_enc->intf_mode;
 518	cfg.intf = interface;
 519
 520	if (cfg.en && phys_enc->ops.needs_single_flush &&
 521			phys_enc->ops.needs_single_flush(phys_enc))
 522		cfg.split_flush_en = true;
 523
 524	if (phys_enc->split_role == ENC_ROLE_MASTER) {
 525		DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
 526
 527		if (hw_mdptop->ops.setup_split_pipe)
 528			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
 529	}
 530}
 531
 532static struct msm_display_topology dpu_encoder_get_topology(
 533			struct dpu_encoder_virt *dpu_enc,
 534			struct dpu_kms *dpu_kms,
 535			struct drm_display_mode *mode)
 536{
 537	struct msm_display_topology topology = {0};
 538	int i, intf_count = 0;
 539
 540	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
 541		if (dpu_enc->phys_encs[i])
 542			intf_count++;
 543
 544	/* Datapath topology selection
 545	 *
 546	 * Dual display
 547	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
 548	 *
 549	 * Single display
 550	 * 1 LM, 1 INTF
 551	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
 552	 *
 553	 * Adding color blocks only to primary interface if available in
 554	 * sufficient number
 555	 */
 556	if (intf_count == 2)
 557		topology.num_lm = 2;
 558	else if (!dpu_kms->catalog->caps->has_3d_merge)
 559		topology.num_lm = 1;
 560	else
 561		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
 562
 563	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
 564		if (dpu_kms->catalog->dspp &&
 565			(dpu_kms->catalog->dspp_count >= topology.num_lm))
 566			topology.num_dspp = topology.num_lm;
 567	}
 568
 569	topology.num_enc = 0;
 570	topology.num_intf = intf_count;
 571
 572	return topology;
 573}
 574static int dpu_encoder_virt_atomic_check(
 575		struct drm_encoder *drm_enc,
 576		struct drm_crtc_state *crtc_state,
 577		struct drm_connector_state *conn_state)
 578{
 579	struct dpu_encoder_virt *dpu_enc;
 580	struct msm_drm_private *priv;
 581	struct dpu_kms *dpu_kms;
 582	const struct drm_display_mode *mode;
 583	struct drm_display_mode *adj_mode;
 584	struct msm_display_topology topology;
 585	struct dpu_global_state *global_state;
 586	int i = 0;
 587	int ret = 0;
 588
 589	if (!drm_enc || !crtc_state || !conn_state) {
 590		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
 591				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
 592		return -EINVAL;
 593	}
 594
 595	dpu_enc = to_dpu_encoder_virt(drm_enc);
 596	DPU_DEBUG_ENC(dpu_enc, "\n");
 597
 598	priv = drm_enc->dev->dev_private;
 599	dpu_kms = to_dpu_kms(priv->kms);
 600	mode = &crtc_state->mode;
 601	adj_mode = &crtc_state->adjusted_mode;
 602	global_state = dpu_kms_get_global_state(crtc_state->state);
 603	if (IS_ERR(global_state))
 604		return PTR_ERR(global_state);
 605
 606	trace_dpu_enc_atomic_check(DRMID(drm_enc));
 607
 608	/* perform atomic check on the first physical encoder (master) */
 609	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 610		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 611
 612		if (phys->ops.atomic_check)
 613			ret = phys->ops.atomic_check(phys, crtc_state,
 614					conn_state);
 615		else if (phys->ops.mode_fixup)
 616			if (!phys->ops.mode_fixup(phys, mode, adj_mode))
 617				ret = -EINVAL;
 618
 619		if (ret) {
 620			DPU_ERROR_ENC(dpu_enc,
 621					"mode unsupported, phys idx %d\n", i);
 622			break;
 623		}
 624	}
 625
 626	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
 627
 628	/* Reserve dynamic resources now. */
 629	if (!ret) {
 630		/*
 631		 * Release and Allocate resources on every modeset
 632		 * Dont allocate when active is false.
 633		 */
 634		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
 635			dpu_rm_release(global_state, drm_enc);
 636
 637			if (!crtc_state->active_changed || crtc_state->active)
 638				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
 639						drm_enc, crtc_state, topology);
 640		}
 641	}
 642
 643	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
 644
 645	return ret;
 646}
 647
 648static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
 649			struct msm_display_info *disp_info)
 650{
 651	struct dpu_vsync_source_cfg vsync_cfg = { 0 };
 652	struct msm_drm_private *priv;
 653	struct dpu_kms *dpu_kms;
 654	struct dpu_hw_mdp *hw_mdptop;
 655	struct drm_encoder *drm_enc;
 656	int i;
 657
 658	if (!dpu_enc || !disp_info) {
 659		DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
 660					dpu_enc != NULL, disp_info != NULL);
 661		return;
 662	} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
 663		DPU_ERROR("invalid num phys enc %d/%d\n",
 664				dpu_enc->num_phys_encs,
 665				(int) ARRAY_SIZE(dpu_enc->hw_pp));
 666		return;
 667	}
 668
 669	drm_enc = &dpu_enc->base;
 670	/* this pointers are checked in virt_enable_helper */
 671	priv = drm_enc->dev->dev_private;
 672
 673	dpu_kms = to_dpu_kms(priv->kms);
 674	hw_mdptop = dpu_kms->hw_mdp;
 675	if (!hw_mdptop) {
 676		DPU_ERROR("invalid mdptop\n");
 677		return;
 678	}
 679
 680	if (hw_mdptop->ops.setup_vsync_source &&
 681			disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
 682		for (i = 0; i < dpu_enc->num_phys_encs; i++)
 683			vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
 684
 685		vsync_cfg.pp_count = dpu_enc->num_phys_encs;
 686		if (disp_info->is_te_using_watchdog_timer)
 687			vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
 688		else
 689			vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
 690
 691		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
 692	}
 693}
 694
 695static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
 696{
 697	struct dpu_encoder_virt *dpu_enc;
 698	int i;
 699
 700	if (!drm_enc) {
 701		DPU_ERROR("invalid encoder\n");
 702		return;
 703	}
 704
 705	dpu_enc = to_dpu_encoder_virt(drm_enc);
 706
 707	DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
 708	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 709		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 710
 711		if (phys->ops.irq_control)
 712			phys->ops.irq_control(phys, enable);
 713	}
 714
 715}
 716
 717static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
 718		bool enable)
 719{
 720	struct msm_drm_private *priv;
 721	struct dpu_kms *dpu_kms;
 722	struct dpu_encoder_virt *dpu_enc;
 723
 724	dpu_enc = to_dpu_encoder_virt(drm_enc);
 725	priv = drm_enc->dev->dev_private;
 726	dpu_kms = to_dpu_kms(priv->kms);
 727
 728	trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
 729
 730	if (!dpu_enc->cur_master) {
 731		DPU_ERROR("encoder master not set\n");
 732		return;
 733	}
 734
 735	if (enable) {
 736		/* enable DPU core clks */
 737		pm_runtime_get_sync(&dpu_kms->pdev->dev);
 738
 739		/* enable all the irq */
 740		_dpu_encoder_irq_control(drm_enc, true);
 741
 742	} else {
 743		/* disable all the irq */
 744		_dpu_encoder_irq_control(drm_enc, false);
 745
 746		/* disable DPU core clks */
 747		pm_runtime_put_sync(&dpu_kms->pdev->dev);
 748	}
 749
 750}
 751
 752static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 753		u32 sw_event)
 754{
 755	struct dpu_encoder_virt *dpu_enc;
 756	struct msm_drm_private *priv;
 757	bool is_vid_mode = false;
 758
 759	if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
 760		DPU_ERROR("invalid parameters\n");
 761		return -EINVAL;
 762	}
 763	dpu_enc = to_dpu_encoder_virt(drm_enc);
 764	priv = drm_enc->dev->dev_private;
 765	is_vid_mode = dpu_enc->disp_info.capabilities &
 766						MSM_DISPLAY_CAP_VID_MODE;
 767
 768	/*
 769	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
 770	 * events and return early for other events (ie wb display).
 771	 */
 772	if (!dpu_enc->idle_pc_supported &&
 773			(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
 774			sw_event != DPU_ENC_RC_EVENT_STOP &&
 775			sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
 776		return 0;
 777
 778	trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
 779			 dpu_enc->rc_state, "begin");
 780
 781	switch (sw_event) {
 782	case DPU_ENC_RC_EVENT_KICKOFF:
 783		/* cancel delayed off work, if any */
 784		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
 785			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
 786					sw_event);
 787
 788		mutex_lock(&dpu_enc->rc_lock);
 789
 790		/* return if the resource control is already in ON state */
 791		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
 792			DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
 793				      DRMID(drm_enc), sw_event);
 794			mutex_unlock(&dpu_enc->rc_lock);
 795			return 0;
 796		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
 797				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
 798			DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
 799				      DRMID(drm_enc), sw_event,
 800				      dpu_enc->rc_state);
 801			mutex_unlock(&dpu_enc->rc_lock);
 802			return -EINVAL;
 803		}
 804
 805		if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
 806			_dpu_encoder_irq_control(drm_enc, true);
 807		else
 808			_dpu_encoder_resource_control_helper(drm_enc, true);
 809
 810		dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
 811
 812		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 813				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 814				 "kickoff");
 815
 816		mutex_unlock(&dpu_enc->rc_lock);
 817		break;
 818
 819	case DPU_ENC_RC_EVENT_FRAME_DONE:
 820		/*
 821		 * mutex lock is not used as this event happens at interrupt
 822		 * context. And locking is not required as, the other events
 823		 * like KICKOFF and STOP does a wait-for-idle before executing
 824		 * the resource_control
 825		 */
 826		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
 827			DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
 828				      DRMID(drm_enc), sw_event,
 829				      dpu_enc->rc_state);
 830			return -EINVAL;
 831		}
 832
 833		/*
 834		 * schedule off work item only when there are no
 835		 * frames pending
 836		 */
 837		if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
 838			DRM_DEBUG_KMS("id:%d skip schedule work\n",
 839				      DRMID(drm_enc));
 840			return 0;
 841		}
 842
 843		queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
 844				   msecs_to_jiffies(dpu_enc->idle_timeout));
 845
 846		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 847				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 848				 "frame done");
 849		break;
 850
 851	case DPU_ENC_RC_EVENT_PRE_STOP:
 852		/* cancel delayed off work, if any */
 853		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
 854			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
 855					sw_event);
 856
 857		mutex_lock(&dpu_enc->rc_lock);
 858
 859		if (is_vid_mode &&
 860			  dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
 861			_dpu_encoder_irq_control(drm_enc, true);
 862		}
 863		/* skip if is already OFF or IDLE, resources are off already */
 864		else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
 865				dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
 866			DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
 867				      DRMID(drm_enc), sw_event,
 868				      dpu_enc->rc_state);
 869			mutex_unlock(&dpu_enc->rc_lock);
 870			return 0;
 871		}
 872
 873		dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
 874
 875		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 876				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 877				 "pre stop");
 878
 879		mutex_unlock(&dpu_enc->rc_lock);
 880		break;
 881
 882	case DPU_ENC_RC_EVENT_STOP:
 883		mutex_lock(&dpu_enc->rc_lock);
 884
 885		/* return if the resource control is already in OFF state */
 886		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
 887			DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
 888				      DRMID(drm_enc), sw_event);
 889			mutex_unlock(&dpu_enc->rc_lock);
 890			return 0;
 891		} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
 892			DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
 893				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
 894			mutex_unlock(&dpu_enc->rc_lock);
 895			return -EINVAL;
 896		}
 897
 898		/**
 899		 * expect to arrive here only if in either idle state or pre-off
 900		 * and in IDLE state the resources are already disabled
 901		 */
 902		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
 903			_dpu_encoder_resource_control_helper(drm_enc, false);
 904
 905		dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
 906
 907		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 908				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 909				 "stop");
 910
 911		mutex_unlock(&dpu_enc->rc_lock);
 912		break;
 913
 914	case DPU_ENC_RC_EVENT_ENTER_IDLE:
 915		mutex_lock(&dpu_enc->rc_lock);
 916
 917		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
 918			DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
 919				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
 920			mutex_unlock(&dpu_enc->rc_lock);
 921			return 0;
 922		}
 923
 924		/*
 925		 * if we are in ON but a frame was just kicked off,
 926		 * ignore the IDLE event, it's probably a stale timer event
 927		 */
 928		if (dpu_enc->frame_busy_mask[0]) {
 929			DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
 930				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
 931			mutex_unlock(&dpu_enc->rc_lock);
 932			return 0;
 933		}
 934
 935		if (is_vid_mode)
 936			_dpu_encoder_irq_control(drm_enc, false);
 937		else
 938			_dpu_encoder_resource_control_helper(drm_enc, false);
 939
 940		dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
 941
 942		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 943				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 944				 "idle");
 945
 946		mutex_unlock(&dpu_enc->rc_lock);
 947		break;
 948
 949	default:
 950		DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
 951			  sw_event);
 952		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 953				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 954				 "error");
 955		break;
 956	}
 957
 958	trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 959			 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 960			 "end");
 961	return 0;
 962}
 963
 964static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 965				      struct drm_display_mode *mode,
 966				      struct drm_display_mode *adj_mode)
 967{
 968	struct dpu_encoder_virt *dpu_enc;
 969	struct msm_drm_private *priv;
 970	struct dpu_kms *dpu_kms;
 971	struct list_head *connector_list;
 972	struct drm_connector *conn = NULL, *conn_iter;
 973	struct drm_crtc *drm_crtc;
 974	struct dpu_crtc_state *cstate;
 975	struct dpu_global_state *global_state;
 976	struct msm_display_topology topology;
 977	struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
 978	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
 979	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
 980	struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
 981	int num_lm, num_ctl, num_pp, num_dspp;
 982	int i, j;
 983
 984	if (!drm_enc) {
 985		DPU_ERROR("invalid encoder\n");
 986		return;
 987	}
 988
 989	dpu_enc = to_dpu_encoder_virt(drm_enc);
 990	DPU_DEBUG_ENC(dpu_enc, "\n");
 991
 992	priv = drm_enc->dev->dev_private;
 993	dpu_kms = to_dpu_kms(priv->kms);
 994	connector_list = &dpu_kms->dev->mode_config.connector_list;
 995
 996	global_state = dpu_kms_get_existing_global_state(dpu_kms);
 997	if (IS_ERR_OR_NULL(global_state)) {
 998		DPU_ERROR("Failed to get global state");
 999		return;
1000	}
1001
1002	trace_dpu_enc_mode_set(DRMID(drm_enc));
1003
1004	list_for_each_entry(conn_iter, connector_list, head)
1005		if (conn_iter->encoder == drm_enc)
1006			conn = conn_iter;
1007
1008	if (!conn) {
1009		DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
1010		return;
1011	} else if (!conn->state) {
1012		DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
1013		return;
1014	}
1015
1016	drm_for_each_crtc(drm_crtc, drm_enc->dev)
1017		if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
1018			break;
1019
1020	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
1021
1022	/* Query resource that have been reserved in atomic check step. */
1023	num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1024		drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1025		ARRAY_SIZE(hw_pp));
1026	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1027		drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1028	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1029		drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1030	num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1031		drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1032		ARRAY_SIZE(hw_dspp));
1033
1034	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1035		dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1036						: NULL;
1037
1038	cstate = to_dpu_crtc_state(drm_crtc->state);
1039
1040	for (i = 0; i < num_lm; i++) {
1041		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1042
1043		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1044		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1045		cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1046	}
1047
1048	cstate->num_mixers = num_lm;
1049
1050	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1051		int num_blk;
1052		struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
1053		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1054
1055		if (!dpu_enc->hw_pp[i]) {
1056			DPU_ERROR_ENC(dpu_enc,
1057				"no pp block assigned at idx: %d\n", i);
1058			return;
1059		}
1060
1061		if (!hw_ctl[i]) {
1062			DPU_ERROR_ENC(dpu_enc,
1063				"no ctl block assigned at idx: %d\n", i);
1064			return;
1065		}
1066
1067		phys->hw_pp = dpu_enc->hw_pp[i];
1068		phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1069
1070		num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
1071			global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
1072			hw_blk, ARRAY_SIZE(hw_blk));
1073		for (j = 0; j < num_blk; j++) {
1074			struct dpu_hw_intf *hw_intf;
1075
1076			hw_intf = to_dpu_hw_intf(hw_blk[i]);
1077			if (hw_intf->idx == phys->intf_idx)
1078				phys->hw_intf = hw_intf;
1079		}
1080
1081		if (!phys->hw_intf) {
1082			DPU_ERROR_ENC(dpu_enc,
1083				      "no intf block assigned at idx: %d\n", i);
1084			return;
1085		}
1086
1087		phys->connector = conn->state->connector;
1088		if (phys->ops.mode_set)
1089			phys->ops.mode_set(phys, mode, adj_mode);
1090	}
1091}
1092
1093static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1094{
1095	struct dpu_encoder_virt *dpu_enc = NULL;
1096	struct msm_drm_private *priv;
1097	int i;
1098
1099	if (!drm_enc || !drm_enc->dev) {
1100		DPU_ERROR("invalid parameters\n");
1101		return;
1102	}
1103
1104	priv = drm_enc->dev->dev_private;
1105
1106	dpu_enc = to_dpu_encoder_virt(drm_enc);
1107	if (!dpu_enc || !dpu_enc->cur_master) {
1108		DPU_ERROR("invalid dpu encoder/master\n");
1109		return;
1110	}
1111
1112	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1113
1114	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1115			!WARN_ON(dpu_enc->num_phys_encs == 0)) {
1116		unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
1117		for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1118			if (!dpu_enc->hw_pp[i])
1119				continue;
1120			_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1121		}
1122	}
1123}
1124
1125void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1126{
1127	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1128
1129	mutex_lock(&dpu_enc->enc_lock);
1130
1131	if (!dpu_enc->enabled)
1132		goto out;
1133
1134	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1135		dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1136	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1137		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1138
1139	_dpu_encoder_virt_enable_helper(drm_enc);
1140
1141out:
1142	mutex_unlock(&dpu_enc->enc_lock);
1143}
1144
1145static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1146{
1147	struct dpu_encoder_virt *dpu_enc = NULL;
1148	int ret = 0;
1149	struct drm_display_mode *cur_mode = NULL;
1150
1151	if (!drm_enc) {
1152		DPU_ERROR("invalid encoder\n");
1153		return;
1154	}
1155	dpu_enc = to_dpu_encoder_virt(drm_enc);
1156
1157	mutex_lock(&dpu_enc->enc_lock);
1158	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1159
1160	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1161			     cur_mode->vdisplay);
1162
1163	/* always enable slave encoder before master */
1164	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1165		dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1166
1167	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1168		dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1169
1170	ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1171	if (ret) {
1172		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1173				ret);
1174		goto out;
1175	}
1176
1177	_dpu_encoder_virt_enable_helper(drm_enc);
1178
1179	dpu_enc->enabled = true;
1180
1181out:
1182	mutex_unlock(&dpu_enc->enc_lock);
1183}
1184
1185static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1186{
1187	struct dpu_encoder_virt *dpu_enc = NULL;
1188	struct msm_drm_private *priv;
1189	struct dpu_kms *dpu_kms;
1190	int i = 0;
1191
1192	if (!drm_enc) {
1193		DPU_ERROR("invalid encoder\n");
1194		return;
1195	} else if (!drm_enc->dev) {
1196		DPU_ERROR("invalid dev\n");
1197		return;
1198	}
1199
1200	dpu_enc = to_dpu_encoder_virt(drm_enc);
1201	DPU_DEBUG_ENC(dpu_enc, "\n");
1202
1203	mutex_lock(&dpu_enc->enc_lock);
1204	dpu_enc->enabled = false;
1205
1206	priv = drm_enc->dev->dev_private;
1207	dpu_kms = to_dpu_kms(priv->kms);
1208
1209	trace_dpu_enc_disable(DRMID(drm_enc));
1210
1211	/* wait for idle */
1212	dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1213
1214	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1215
1216	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1217		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1218
1219		if (phys->ops.disable)
1220			phys->ops.disable(phys);
1221	}
1222
1223	/* after phys waits for frame-done, should be no more frames pending */
1224	if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1225		DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1226		del_timer_sync(&dpu_enc->frame_done_timer);
1227	}
1228
1229	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1230
1231	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1232		dpu_enc->phys_encs[i]->connector = NULL;
1233	}
1234
1235	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1236
1237	mutex_unlock(&dpu_enc->enc_lock);
1238}
1239
1240static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1241		enum dpu_intf_type type, u32 controller_id)
1242{
1243	int i = 0;
1244
1245	for (i = 0; i < catalog->intf_count; i++) {
1246		if (catalog->intf[i].type == type
1247		    && catalog->intf[i].controller_id == controller_id) {
1248			return catalog->intf[i].id;
1249		}
1250	}
1251
1252	return INTF_MAX;
1253}
1254
1255static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1256		struct dpu_encoder_phys *phy_enc)
1257{
1258	struct dpu_encoder_virt *dpu_enc = NULL;
1259	unsigned long lock_flags;
1260
1261	if (!drm_enc || !phy_enc)
1262		return;
1263
1264	DPU_ATRACE_BEGIN("encoder_vblank_callback");
1265	dpu_enc = to_dpu_encoder_virt(drm_enc);
1266
1267	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1268	if (dpu_enc->crtc)
1269		dpu_crtc_vblank_callback(dpu_enc->crtc);
1270	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1271
1272	atomic_inc(&phy_enc->vsync_cnt);
1273	DPU_ATRACE_END("encoder_vblank_callback");
1274}
1275
1276static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1277		struct dpu_encoder_phys *phy_enc)
1278{
1279	if (!phy_enc)
1280		return;
1281
1282	DPU_ATRACE_BEGIN("encoder_underrun_callback");
1283	atomic_inc(&phy_enc->underrun_cnt);
1284	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1285				  atomic_read(&phy_enc->underrun_cnt));
1286	DPU_ATRACE_END("encoder_underrun_callback");
1287}
1288
1289void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1290{
1291	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1292	unsigned long lock_flags;
1293
1294	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1295	/* crtc should always be cleared before re-assigning */
1296	WARN_ON(crtc && dpu_enc->crtc);
1297	dpu_enc->crtc = crtc;
1298	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1299}
1300
1301void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1302					struct drm_crtc *crtc, bool enable)
1303{
1304	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1305	unsigned long lock_flags;
1306	int i;
1307
1308	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1309
1310	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1311	if (dpu_enc->crtc != crtc) {
1312		spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1313		return;
1314	}
1315	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1316
1317	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1318		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1319
1320		if (phys->ops.control_vblank_irq)
1321			phys->ops.control_vblank_irq(phys, enable);
1322	}
1323}
1324
1325void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1326		void (*frame_event_cb)(void *, u32 event),
1327		void *frame_event_cb_data)
1328{
1329	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1330	unsigned long lock_flags;
1331	bool enable;
1332
1333	enable = frame_event_cb ? true : false;
1334
1335	if (!drm_enc) {
1336		DPU_ERROR("invalid encoder\n");
1337		return;
1338	}
1339	trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1340
1341	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1342	dpu_enc->crtc_frame_event_cb = frame_event_cb;
1343	dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1344	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1345}
1346
1347static void dpu_encoder_frame_done_callback(
1348		struct drm_encoder *drm_enc,
1349		struct dpu_encoder_phys *ready_phys, u32 event)
1350{
1351	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1352	unsigned int i;
1353
1354	if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1355			| DPU_ENCODER_FRAME_EVENT_ERROR
1356			| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1357
1358		if (!dpu_enc->frame_busy_mask[0]) {
1359			/**
1360			 * suppress frame_done without waiter,
1361			 * likely autorefresh
1362			 */
1363			trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
1364					event, ready_phys->intf_idx);
1365			return;
1366		}
1367
1368		/* One of the physical encoders has become idle */
1369		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1370			if (dpu_enc->phys_encs[i] == ready_phys) {
1371				trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1372						dpu_enc->frame_busy_mask[0]);
1373				clear_bit(i, dpu_enc->frame_busy_mask);
1374			}
1375		}
1376
1377		if (!dpu_enc->frame_busy_mask[0]) {
1378			atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1379			del_timer(&dpu_enc->frame_done_timer);
1380
1381			dpu_encoder_resource_control(drm_enc,
1382					DPU_ENC_RC_EVENT_FRAME_DONE);
1383
1384			if (dpu_enc->crtc_frame_event_cb)
1385				dpu_enc->crtc_frame_event_cb(
1386					dpu_enc->crtc_frame_event_cb_data,
1387					event);
1388		}
1389	} else {
1390		if (dpu_enc->crtc_frame_event_cb)
1391			dpu_enc->crtc_frame_event_cb(
1392				dpu_enc->crtc_frame_event_cb_data, event);
1393	}
1394}
1395
1396static void dpu_encoder_off_work(struct work_struct *work)
1397{
1398	struct dpu_encoder_virt *dpu_enc = container_of(work,
1399			struct dpu_encoder_virt, delayed_off_work.work);
1400
1401	if (!dpu_enc) {
1402		DPU_ERROR("invalid dpu encoder\n");
1403		return;
1404	}
1405
1406	dpu_encoder_resource_control(&dpu_enc->base,
1407						DPU_ENC_RC_EVENT_ENTER_IDLE);
1408
1409	dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1410				DPU_ENCODER_FRAME_EVENT_IDLE);
1411}
1412
1413/**
1414 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1415 * drm_enc: Pointer to drm encoder structure
1416 * phys: Pointer to physical encoder structure
1417 * extra_flush_bits: Additional bit mask to include in flush trigger
1418 */
1419static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1420		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1421{
1422	struct dpu_hw_ctl *ctl;
1423	int pending_kickoff_cnt;
1424	u32 ret = UINT_MAX;
1425
1426	if (!phys->hw_pp) {
1427		DPU_ERROR("invalid pingpong hw\n");
1428		return;
1429	}
1430
1431	ctl = phys->hw_ctl;
1432	if (!ctl->ops.trigger_flush) {
1433		DPU_ERROR("missing trigger cb\n");
1434		return;
1435	}
1436
1437	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1438
1439	if (extra_flush_bits && ctl->ops.update_pending_flush)
1440		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1441
1442	ctl->ops.trigger_flush(ctl);
1443
1444	if (ctl->ops.get_pending_flush)
1445		ret = ctl->ops.get_pending_flush(ctl);
1446
1447	trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
1448				    pending_kickoff_cnt, ctl->idx,
1449				    extra_flush_bits, ret);
1450}
1451
1452/**
1453 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1454 * phys: Pointer to physical encoder structure
1455 */
1456static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1457{
1458	if (!phys) {
1459		DPU_ERROR("invalid argument(s)\n");
1460		return;
1461	}
1462
1463	if (!phys->hw_pp) {
1464		DPU_ERROR("invalid pingpong hw\n");
1465		return;
1466	}
1467
1468	if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1469		phys->ops.trigger_start(phys);
1470}
1471
1472void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1473{
1474	struct dpu_hw_ctl *ctl;
1475
1476	ctl = phys_enc->hw_ctl;
1477	if (ctl->ops.trigger_start) {
1478		ctl->ops.trigger_start(ctl);
1479		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1480	}
1481}
1482
1483static int dpu_encoder_helper_wait_event_timeout(
1484		int32_t drm_id,
1485		int32_t hw_id,
1486		struct dpu_encoder_wait_info *info)
1487{
1488	int rc = 0;
1489	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1490	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1491	s64 time;
1492
1493	do {
1494		rc = wait_event_timeout(*(info->wq),
1495				atomic_read(info->atomic_cnt) == 0, jiffies);
1496		time = ktime_to_ms(ktime_get());
1497
1498		trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
1499						 expected_time,
1500						 atomic_read(info->atomic_cnt));
1501	/* If we timed out, counter is valid and time is less, wait again */
1502	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1503			(time < expected_time));
1504
1505	return rc;
1506}
1507
1508static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1509{
1510	struct dpu_encoder_virt *dpu_enc;
1511	struct dpu_hw_ctl *ctl;
1512	int rc;
1513
1514	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1515	ctl = phys_enc->hw_ctl;
1516
1517	if (!ctl->ops.reset)
1518		return;
1519
1520	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
1521		      ctl->idx);
1522
1523	rc = ctl->ops.reset(ctl);
1524	if (rc)
1525		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
1526
1527	phys_enc->enable_state = DPU_ENC_ENABLED;
1528}
1529
1530/**
1531 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1532 *	Iterate through the physical encoders and perform consolidated flush
1533 *	and/or control start triggering as needed. This is done in the virtual
1534 *	encoder rather than the individual physical ones in order to handle
1535 *	use cases that require visibility into multiple physical encoders at
1536 *	a time.
1537 * dpu_enc: Pointer to virtual encoder structure
1538 */
1539static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1540{
1541	struct dpu_hw_ctl *ctl;
1542	uint32_t i, pending_flush;
1543	unsigned long lock_flags;
1544
1545	pending_flush = 0x0;
1546
1547	/* update pending counts and trigger kickoff ctl flush atomically */
1548	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1549
1550	/* don't perform flush/start operations for slave encoders */
1551	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1552		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1553
1554		if (phys->enable_state == DPU_ENC_DISABLED)
1555			continue;
1556
1557		ctl = phys->hw_ctl;
1558
1559		/*
1560		 * This is cleared in frame_done worker, which isn't invoked
1561		 * for async commits. So don't set this for async, since it'll
1562		 * roll over to the next commit.
1563		 */
1564		if (phys->split_role != ENC_ROLE_SLAVE)
1565			set_bit(i, dpu_enc->frame_busy_mask);
1566
1567		if (!phys->ops.needs_single_flush ||
1568				!phys->ops.needs_single_flush(phys))
1569			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1570		else if (ctl->ops.get_pending_flush)
1571			pending_flush |= ctl->ops.get_pending_flush(ctl);
1572	}
1573
1574	/* for split flush, combine pending flush masks and send to master */
1575	if (pending_flush && dpu_enc->cur_master) {
1576		_dpu_encoder_trigger_flush(
1577				&dpu_enc->base,
1578				dpu_enc->cur_master,
1579				pending_flush);
1580	}
1581
1582	_dpu_encoder_trigger_start(dpu_enc->cur_master);
1583
1584	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1585}
1586
1587void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1588{
1589	struct dpu_encoder_virt *dpu_enc;
1590	struct dpu_encoder_phys *phys;
1591	unsigned int i;
1592	struct dpu_hw_ctl *ctl;
1593	struct msm_display_info *disp_info;
1594
1595	if (!drm_enc) {
1596		DPU_ERROR("invalid encoder\n");
1597		return;
1598	}
1599	dpu_enc = to_dpu_encoder_virt(drm_enc);
1600	disp_info = &dpu_enc->disp_info;
1601
1602	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1603		phys = dpu_enc->phys_encs[i];
1604
1605		ctl = phys->hw_ctl;
1606		if (ctl->ops.clear_pending_flush)
1607			ctl->ops.clear_pending_flush(ctl);
1608
1609		/* update only for command mode primary ctl */
1610		if ((phys == dpu_enc->cur_master) &&
1611		   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1612		    && ctl->ops.trigger_pending)
1613			ctl->ops.trigger_pending(ctl);
1614	}
1615}
1616
1617static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1618		struct drm_display_mode *mode)
1619{
1620	u64 pclk_rate;
1621	u32 pclk_period;
1622	u32 line_time;
1623
1624	/*
1625	 * For linetime calculation, only operate on master encoder.
1626	 */
1627	if (!dpu_enc->cur_master)
1628		return 0;
1629
1630	if (!dpu_enc->cur_master->ops.get_line_count) {
1631		DPU_ERROR("get_line_count function not defined\n");
1632		return 0;
1633	}
1634
1635	pclk_rate = mode->clock; /* pixel clock in kHz */
1636	if (pclk_rate == 0) {
1637		DPU_ERROR("pclk is 0, cannot calculate line time\n");
1638		return 0;
1639	}
1640
1641	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1642	if (pclk_period == 0) {
1643		DPU_ERROR("pclk period is 0\n");
1644		return 0;
1645	}
1646
1647	/*
1648	 * Line time calculation based on Pixel clock and HTOTAL.
1649	 * Final unit is in ns.
1650	 */
1651	line_time = (pclk_period * mode->htotal) / 1000;
1652	if (line_time == 0) {
1653		DPU_ERROR("line time calculation is 0\n");
1654		return 0;
1655	}
1656
1657	DPU_DEBUG_ENC(dpu_enc,
1658			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1659			pclk_rate, pclk_period, line_time);
1660
1661	return line_time;
1662}
1663
1664int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1665{
1666	struct drm_display_mode *mode;
1667	struct dpu_encoder_virt *dpu_enc;
1668	u32 cur_line;
1669	u32 line_time;
1670	u32 vtotal, time_to_vsync;
1671	ktime_t cur_time;
1672
1673	dpu_enc = to_dpu_encoder_virt(drm_enc);
1674
1675	if (!drm_enc->crtc || !drm_enc->crtc->state) {
1676		DPU_ERROR("crtc/crtc state object is NULL\n");
1677		return -EINVAL;
1678	}
1679	mode = &drm_enc->crtc->state->adjusted_mode;
1680
1681	line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1682	if (!line_time)
1683		return -EINVAL;
1684
1685	cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1686
1687	vtotal = mode->vtotal;
1688	if (cur_line >= vtotal)
1689		time_to_vsync = line_time * vtotal;
1690	else
1691		time_to_vsync = line_time * (vtotal - cur_line);
1692
1693	if (time_to_vsync == 0) {
1694		DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1695				vtotal);
1696		return -EINVAL;
1697	}
1698
1699	cur_time = ktime_get();
1700	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1701
1702	DPU_DEBUG_ENC(dpu_enc,
1703			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1704			cur_line, vtotal, time_to_vsync,
1705			ktime_to_ms(cur_time),
1706			ktime_to_ms(*wakeup_time));
1707	return 0;
1708}
1709
1710static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1711{
1712	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1713			vsync_event_timer);
1714	struct drm_encoder *drm_enc = &dpu_enc->base;
1715	struct msm_drm_private *priv;
1716	struct msm_drm_thread *event_thread;
1717
1718	if (!drm_enc->dev || !drm_enc->crtc) {
1719		DPU_ERROR("invalid parameters\n");
1720		return;
1721	}
1722
1723	priv = drm_enc->dev->dev_private;
1724
1725	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1726		DPU_ERROR("invalid crtc index\n");
1727		return;
1728	}
1729	event_thread = &priv->event_thread[drm_enc->crtc->index];
1730	if (!event_thread) {
1731		DPU_ERROR("event_thread not found for crtc:%d\n",
1732				drm_enc->crtc->index);
1733		return;
1734	}
1735
1736	del_timer(&dpu_enc->vsync_event_timer);
1737}
1738
1739static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1740{
1741	struct dpu_encoder_virt *dpu_enc = container_of(work,
1742			struct dpu_encoder_virt, vsync_event_work);
1743	ktime_t wakeup_time;
1744
1745	if (!dpu_enc) {
1746		DPU_ERROR("invalid dpu encoder\n");
1747		return;
1748	}
1749
1750	if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1751		return;
1752
1753	trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1754	mod_timer(&dpu_enc->vsync_event_timer,
1755			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1756}
1757
1758void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1759{
1760	struct dpu_encoder_virt *dpu_enc;
1761	struct dpu_encoder_phys *phys;
1762	bool needs_hw_reset = false;
1763	unsigned int i;
1764
1765	dpu_enc = to_dpu_encoder_virt(drm_enc);
1766
1767	trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1768
1769	/* prepare for next kickoff, may include waiting on previous kickoff */
1770	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1771	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1772		phys = dpu_enc->phys_encs[i];
1773		if (phys->ops.prepare_for_kickoff)
1774			phys->ops.prepare_for_kickoff(phys);
1775		if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1776			needs_hw_reset = true;
1777	}
1778	DPU_ATRACE_END("enc_prepare_for_kickoff");
1779
1780	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1781
1782	/* if any phys needs reset, reset all phys, in-order */
1783	if (needs_hw_reset) {
1784		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1785		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1786			dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1787		}
1788	}
1789}
1790
1791void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1792{
1793	struct dpu_encoder_virt *dpu_enc;
1794	struct dpu_encoder_phys *phys;
1795	ktime_t wakeup_time;
1796	unsigned long timeout_ms;
1797	unsigned int i;
1798
1799	DPU_ATRACE_BEGIN("encoder_kickoff");
1800	dpu_enc = to_dpu_encoder_virt(drm_enc);
1801
1802	trace_dpu_enc_kickoff(DRMID(drm_enc));
1803
1804	timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1805			drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1806
1807	atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1808	mod_timer(&dpu_enc->frame_done_timer,
1809			jiffies + msecs_to_jiffies(timeout_ms));
1810
1811	/* All phys encs are ready to go, trigger the kickoff */
1812	_dpu_encoder_kickoff_phys(dpu_enc);
1813
1814	/* allow phys encs to handle any post-kickoff business */
1815	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1816		phys = dpu_enc->phys_encs[i];
1817		if (phys->ops.handle_post_kickoff)
1818			phys->ops.handle_post_kickoff(phys);
1819	}
1820
1821	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1822			!dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1823		trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1824					    ktime_to_ms(wakeup_time));
1825		mod_timer(&dpu_enc->vsync_event_timer,
1826				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1827	}
1828
1829	DPU_ATRACE_END("encoder_kickoff");
1830}
1831
1832void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
1833{
1834	struct dpu_encoder_virt *dpu_enc;
1835	struct dpu_encoder_phys *phys;
1836	int i;
1837
1838	if (!drm_enc) {
1839		DPU_ERROR("invalid encoder\n");
1840		return;
1841	}
1842	dpu_enc = to_dpu_encoder_virt(drm_enc);
1843
1844	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1845		phys = dpu_enc->phys_encs[i];
1846		if (phys->ops.prepare_commit)
1847			phys->ops.prepare_commit(phys);
1848	}
1849}
1850
1851#ifdef CONFIG_DEBUG_FS
1852static int _dpu_encoder_status_show(struct seq_file *s, void *data)
1853{
1854	struct dpu_encoder_virt *dpu_enc = s->private;
1855	int i;
1856
1857	mutex_lock(&dpu_enc->enc_lock);
1858	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1859		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1860
1861		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
1862				phys->intf_idx - INTF_0,
1863				atomic_read(&phys->vsync_cnt),
1864				atomic_read(&phys->underrun_cnt));
1865
1866		switch (phys->intf_mode) {
1867		case INTF_MODE_VIDEO:
1868			seq_puts(s, "mode: video\n");
1869			break;
1870		case INTF_MODE_CMD:
1871			seq_puts(s, "mode: command\n");
1872			break;
1873		default:
1874			seq_puts(s, "mode: ???\n");
1875			break;
1876		}
1877	}
1878	mutex_unlock(&dpu_enc->enc_lock);
1879
1880	return 0;
1881}
1882
1883static int _dpu_encoder_debugfs_status_open(struct inode *inode,
1884		struct file *file)
1885{
1886	return single_open(file, _dpu_encoder_status_show, inode->i_private);
1887}
1888
1889static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1890{
1891	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1892	int i;
1893
1894	static const struct file_operations debugfs_status_fops = {
1895		.open =		_dpu_encoder_debugfs_status_open,
1896		.read =		seq_read,
1897		.llseek =	seq_lseek,
1898		.release =	single_release,
1899	};
1900
1901	char name[DPU_NAME_SIZE];
1902
1903	if (!drm_enc->dev) {
1904		DPU_ERROR("invalid encoder or kms\n");
1905		return -EINVAL;
1906	}
1907
1908	snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
1909
1910	/* create overall sub-directory for the encoder */
1911	dpu_enc->debugfs_root = debugfs_create_dir(name,
1912			drm_enc->dev->primary->debugfs_root);
1913
1914	/* don't error check these */
1915	debugfs_create_file("status", 0600,
1916		dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
1917
1918	for (i = 0; i < dpu_enc->num_phys_encs; i++)
1919		if (dpu_enc->phys_encs[i]->ops.late_register)
1920			dpu_enc->phys_encs[i]->ops.late_register(
1921					dpu_enc->phys_encs[i],
1922					dpu_enc->debugfs_root);
1923
1924	return 0;
1925}
1926#else
1927static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1928{
1929	return 0;
1930}
1931#endif
1932
1933static int dpu_encoder_late_register(struct drm_encoder *encoder)
1934{
1935	return _dpu_encoder_init_debugfs(encoder);
1936}
1937
1938static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
1939{
1940	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
1941
1942	debugfs_remove_recursive(dpu_enc->debugfs_root);
1943}
1944
1945static int dpu_encoder_virt_add_phys_encs(
1946		u32 display_caps,
1947		struct dpu_encoder_virt *dpu_enc,
1948		struct dpu_enc_phys_init_params *params)
1949{
1950	struct dpu_encoder_phys *enc = NULL;
1951
1952	DPU_DEBUG_ENC(dpu_enc, "\n");
1953
1954	/*
1955	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
1956	 * in this function, check up-front.
1957	 */
1958	if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
1959			ARRAY_SIZE(dpu_enc->phys_encs)) {
1960		DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
1961			  dpu_enc->num_phys_encs);
1962		return -EINVAL;
1963	}
1964
1965	if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
1966		enc = dpu_encoder_phys_vid_init(params);
1967
1968		if (IS_ERR_OR_NULL(enc)) {
1969			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
1970				PTR_ERR(enc));
1971			return enc == NULL ? -EINVAL : PTR_ERR(enc);
1972		}
1973
1974		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
1975		++dpu_enc->num_phys_encs;
1976	}
1977
1978	if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
1979		enc = dpu_encoder_phys_cmd_init(params);
1980
1981		if (IS_ERR_OR_NULL(enc)) {
1982			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
1983				PTR_ERR(enc));
1984			return enc == NULL ? -EINVAL : PTR_ERR(enc);
1985		}
1986
1987		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
1988		++dpu_enc->num_phys_encs;
1989	}
1990
1991	if (params->split_role == ENC_ROLE_SLAVE)
1992		dpu_enc->cur_slave = enc;
1993	else
1994		dpu_enc->cur_master = enc;
1995
1996	return 0;
1997}
1998
1999static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2000	.handle_vblank_virt = dpu_encoder_vblank_callback,
2001	.handle_underrun_virt = dpu_encoder_underrun_callback,
2002	.handle_frame_done = dpu_encoder_frame_done_callback,
2003};
2004
2005static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2006				 struct dpu_kms *dpu_kms,
2007				 struct msm_display_info *disp_info)
2008{
2009	int ret = 0;
2010	int i = 0;
2011	enum dpu_intf_type intf_type;
2012	struct dpu_enc_phys_init_params phys_params;
2013
2014	if (!dpu_enc) {
2015		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2016		return -EINVAL;
2017	}
2018
2019	dpu_enc->cur_master = NULL;
2020
2021	memset(&phys_params, 0, sizeof(phys_params));
2022	phys_params.dpu_kms = dpu_kms;
2023	phys_params.parent = &dpu_enc->base;
2024	phys_params.parent_ops = &dpu_encoder_parent_ops;
2025	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2026
2027	DPU_DEBUG("\n");
2028
2029	switch (disp_info->intf_type) {
2030	case DRM_MODE_ENCODER_DSI:
2031		intf_type = INTF_DSI;
2032		break;
2033	default:
2034		DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
2035		return -EINVAL;
2036	}
2037
2038	WARN_ON(disp_info->num_of_h_tiles < 1);
2039
2040	DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2041
2042	if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2043	    (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2044		dpu_enc->idle_pc_supported =
2045				dpu_kms->catalog->caps->has_idle_pc;
2046
2047	mutex_lock(&dpu_enc->enc_lock);
2048	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2049		/*
2050		 * Left-most tile is at index 0, content is controller id
2051		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2052		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2053		 */
2054		u32 controller_id = disp_info->h_tile_instance[i];
2055
2056		if (disp_info->num_of_h_tiles > 1) {
2057			if (i == 0)
2058				phys_params.split_role = ENC_ROLE_MASTER;
2059			else
2060				phys_params.split_role = ENC_ROLE_SLAVE;
2061		} else {
2062			phys_params.split_role = ENC_ROLE_SOLO;
2063		}
2064
2065		DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2066				i, controller_id, phys_params.split_role);
2067
2068		phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2069													intf_type,
2070													controller_id);
2071		if (phys_params.intf_idx == INTF_MAX) {
2072			DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
2073						  intf_type, controller_id);
2074			ret = -EINVAL;
2075		}
2076
2077		if (!ret) {
2078			ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
2079												 dpu_enc,
2080												 &phys_params);
2081			if (ret)
2082				DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2083		}
2084	}
2085
2086	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2087		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2088		atomic_set(&phys->vsync_cnt, 0);
2089		atomic_set(&phys->underrun_cnt, 0);
2090	}
2091	mutex_unlock(&dpu_enc->enc_lock);
2092
2093	return ret;
2094}
2095
2096static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2097{
2098	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2099			frame_done_timer);
2100	struct drm_encoder *drm_enc = &dpu_enc->base;
2101	u32 event;
2102
2103	if (!drm_enc->dev) {
2104		DPU_ERROR("invalid parameters\n");
2105		return;
2106	}
2107
2108	if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2109		DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2110			      DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2111		return;
2112	} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2113		DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2114		return;
2115	}
2116
2117	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2118
2119	event = DPU_ENCODER_FRAME_EVENT_ERROR;
2120	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2121	dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2122}
2123
2124static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2125	.mode_set = dpu_encoder_virt_mode_set,
2126	.disable = dpu_encoder_virt_disable,
2127	.enable = dpu_kms_encoder_enable,
2128	.atomic_check = dpu_encoder_virt_atomic_check,
2129
2130	/* This is called by dpu_kms_encoder_enable */
2131	.commit = dpu_encoder_virt_enable,
2132};
2133
2134static const struct drm_encoder_funcs dpu_encoder_funcs = {
2135		.destroy = dpu_encoder_destroy,
2136		.late_register = dpu_encoder_late_register,
2137		.early_unregister = dpu_encoder_early_unregister,
2138};
2139
2140int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2141		struct msm_display_info *disp_info)
2142{
2143	struct msm_drm_private *priv = dev->dev_private;
2144	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2145	struct drm_encoder *drm_enc = NULL;
2146	struct dpu_encoder_virt *dpu_enc = NULL;
2147	int ret = 0;
2148
2149	dpu_enc = to_dpu_encoder_virt(enc);
2150
2151	ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2152	if (ret)
2153		goto fail;
2154
2155	atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2156	timer_setup(&dpu_enc->frame_done_timer,
2157			dpu_encoder_frame_done_timeout, 0);
2158
2159	if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2160		timer_setup(&dpu_enc->vsync_event_timer,
2161				dpu_encoder_vsync_event_handler,
2162				0);
2163
2164
2165	INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2166			dpu_encoder_off_work);
2167	dpu_enc->idle_timeout = IDLE_TIMEOUT;
2168
2169	kthread_init_work(&dpu_enc->vsync_event_work,
2170			dpu_encoder_vsync_event_work_handler);
2171
2172	memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2173
2174	DPU_DEBUG_ENC(dpu_enc, "created\n");
2175
2176	return ret;
2177
2178fail:
2179	DPU_ERROR("failed to create encoder\n");
2180	if (drm_enc)
2181		dpu_encoder_destroy(drm_enc);
2182
2183	return ret;
2184
2185
2186}
2187
2188struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2189		int drm_enc_mode)
2190{
2191	struct dpu_encoder_virt *dpu_enc = NULL;
2192	int rc = 0;
2193
2194	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2195	if (!dpu_enc)
2196		return ERR_PTR(-ENOMEM);
2197
2198	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2199			drm_enc_mode, NULL);
2200	if (rc) {
2201		devm_kfree(dev->dev, dpu_enc);
2202		return ERR_PTR(rc);
2203	}
2204
2205	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2206
2207	spin_lock_init(&dpu_enc->enc_spinlock);
2208	dpu_enc->enabled = false;
2209	mutex_init(&dpu_enc->enc_lock);
2210	mutex_init(&dpu_enc->rc_lock);
2211
2212	return &dpu_enc->base;
2213}
2214
2215int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2216	enum msm_event_wait event)
2217{
2218	int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2219	struct dpu_encoder_virt *dpu_enc = NULL;
2220	int i, ret = 0;
2221
2222	if (!drm_enc) {
2223		DPU_ERROR("invalid encoder\n");
2224		return -EINVAL;
2225	}
2226	dpu_enc = to_dpu_encoder_virt(drm_enc);
2227	DPU_DEBUG_ENC(dpu_enc, "\n");
2228
2229	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2230		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2231
2232		switch (event) {
2233		case MSM_ENC_COMMIT_DONE:
2234			fn_wait = phys->ops.wait_for_commit_done;
2235			break;
2236		case MSM_ENC_TX_COMPLETE:
2237			fn_wait = phys->ops.wait_for_tx_complete;
2238			break;
2239		case MSM_ENC_VBLANK:
2240			fn_wait = phys->ops.wait_for_vblank;
2241			break;
2242		default:
2243			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2244					event);
2245			return -EINVAL;
2246		}
2247
2248		if (fn_wait) {
2249			DPU_ATRACE_BEGIN("wait_for_completion_event");
2250			ret = fn_wait(phys);
2251			DPU_ATRACE_END("wait_for_completion_event");
2252			if (ret)
2253				return ret;
2254		}
2255	}
2256
2257	return ret;
2258}
2259
2260enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2261{
2262	struct dpu_encoder_virt *dpu_enc = NULL;
2263
2264	if (!encoder) {
2265		DPU_ERROR("invalid encoder\n");
2266		return INTF_MODE_NONE;
2267	}
2268	dpu_enc = to_dpu_encoder_virt(encoder);
2269
2270	if (dpu_enc->cur_master)
2271		return dpu_enc->cur_master->intf_mode;
2272
2273	if (dpu_enc->num_phys_encs)
2274		return dpu_enc->phys_encs[0]->intf_mode;
2275
2276	return INTF_MODE_NONE;
2277}