Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2013 Red Hat
   5 * Author: Rob Clark <robdclark@gmail.com>
   6 */
   7
   8#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
   9#include <linux/sort.h>
  10#include <linux/debugfs.h>
  11#include <linux/ktime.h>
  12#include <linux/bits.h>
  13
  14#include <drm/drm_crtc.h>
  15#include <drm/drm_flip_work.h>
  16#include <drm/drm_mode.h>
  17#include <drm/drm_probe_helper.h>
  18#include <drm/drm_rect.h>
  19#include <drm/drm_vblank.h>
  20
  21#include "dpu_kms.h"
  22#include "dpu_hw_lm.h"
  23#include "dpu_hw_ctl.h"
  24#include "dpu_hw_dspp.h"
  25#include "dpu_crtc.h"
  26#include "dpu_plane.h"
  27#include "dpu_encoder.h"
  28#include "dpu_vbif.h"
  29#include "dpu_core_perf.h"
  30#include "dpu_trace.h"
  31
  32#define DPU_DRM_BLEND_OP_NOT_DEFINED    0
  33#define DPU_DRM_BLEND_OP_OPAQUE         1
  34#define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
  35#define DPU_DRM_BLEND_OP_COVERAGE       3
  36#define DPU_DRM_BLEND_OP_MAX            4
  37
  38/* layer mixer index on dpu_crtc */
  39#define LEFT_MIXER 0
  40#define RIGHT_MIXER 1
  41
  42/* timeout in ms waiting for frame done */
  43#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
  44
  45#define	CONVERT_S3_15(val) \
  46	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
  47
  48static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
  49{
  50	struct msm_drm_private *priv = crtc->dev->dev_private;
  51
  52	return to_dpu_kms(priv->kms);
  53}
  54
  55static void dpu_crtc_destroy(struct drm_crtc *crtc)
  56{
  57	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  58
  59	DPU_DEBUG("\n");
  60
  61	if (!crtc)
  62		return;
  63
  64	drm_crtc_cleanup(crtc);
  65	kfree(dpu_crtc);
  66}
  67
  68static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
  69		struct dpu_plane_state *pstate, struct dpu_format *format)
  70{
  71	struct dpu_hw_mixer *lm = mixer->hw_lm;
  72	uint32_t blend_op;
  73	struct drm_format_name_buf format_name;
  74
  75	/* default to opaque blending */
  76	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
  77		DPU_BLEND_BG_ALPHA_BG_CONST;
  78
  79	if (format->alpha_enable) {
  80		/* coverage blending */
  81		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
  82			DPU_BLEND_BG_ALPHA_FG_PIXEL |
  83			DPU_BLEND_BG_INV_ALPHA;
  84	}
  85
  86	lm->ops.setup_blend_config(lm, pstate->stage,
  87				0xFF, 0, blend_op);
  88
  89	DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
  90		drm_get_format_name(format->base.pixel_format, &format_name),
  91		format->alpha_enable, blend_op);
  92}
  93
  94static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
  95{
  96	struct dpu_crtc_state *crtc_state;
  97	int lm_idx, lm_horiz_position;
  98
  99	crtc_state = to_dpu_crtc_state(crtc->state);
 100
 101	lm_horiz_position = 0;
 102	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
 103		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
 104		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
 105		struct dpu_hw_mixer_cfg cfg;
 106
 107		if (!lm_roi || !drm_rect_visible(lm_roi))
 108			continue;
 109
 110		cfg.out_width = drm_rect_width(lm_roi);
 111		cfg.out_height = drm_rect_height(lm_roi);
 112		cfg.right_mixer = lm_horiz_position++;
 113		cfg.flags = 0;
 114		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
 115	}
 116}
 117
 118static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 119	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
 120{
 121	struct drm_plane *plane;
 122	struct drm_framebuffer *fb;
 123	struct drm_plane_state *state;
 124	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
 125	struct dpu_plane_state *pstate = NULL;
 126	struct dpu_format *format;
 127	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
 128	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
 129
 130	u32 flush_mask;
 131	uint32_t stage_idx, lm_idx;
 132	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
 133	bool bg_alpha_enable = false;
 134
 135	drm_atomic_crtc_for_each_plane(plane, crtc) {
 136		state = plane->state;
 137		if (!state)
 138			continue;
 139
 140		pstate = to_dpu_plane_state(state);
 141		fb = state->fb;
 142
 143		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
 144
 145		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 146				crtc->base.id,
 147				pstate->stage,
 148				plane->base.id,
 149				dpu_plane_pipe(plane) - SSPP_VIG0,
 150				state->fb ? state->fb->base.id : -1);
 151
 152		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
 153
 154		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
 155			bg_alpha_enable = true;
 156
 157		stage_idx = zpos_cnt[pstate->stage]++;
 158		stage_cfg->stage[pstate->stage][stage_idx] =
 159					dpu_plane_pipe(plane);
 160		stage_cfg->multirect_index[pstate->stage][stage_idx] =
 161					pstate->multirect_index;
 162
 163		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
 164					   state, pstate, stage_idx,
 165					   dpu_plane_pipe(plane) - SSPP_VIG0,
 166					   format->base.pixel_format,
 167					   fb ? fb->modifier : 0);
 168
 169		/* blend config update */
 170		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
 171			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
 172						pstate, format);
 173
 174			mixer[lm_idx].flush_mask |= flush_mask;
 175
 176			if (bg_alpha_enable && !format->alpha_enable)
 177				mixer[lm_idx].mixer_op_mode = 0;
 178			else
 179				mixer[lm_idx].mixer_op_mode |=
 180						1 << pstate->stage;
 181		}
 182	}
 183
 184	 _dpu_crtc_program_lm_output_roi(crtc);
 185}
 186
 187/**
 188 * _dpu_crtc_blend_setup - configure crtc mixers
 189 * @crtc: Pointer to drm crtc structure
 190 */
 191static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
 192{
 193	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 194	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
 195	struct dpu_crtc_mixer *mixer = cstate->mixers;
 196	struct dpu_hw_ctl *ctl;
 197	struct dpu_hw_mixer *lm;
 198	int i;
 199
 200	DPU_DEBUG("%s\n", dpu_crtc->name);
 201
 202	for (i = 0; i < cstate->num_mixers; i++) {
 203		mixer[i].mixer_op_mode = 0;
 204		mixer[i].flush_mask = 0;
 205		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
 206			mixer[i].lm_ctl->ops.clear_all_blendstages(
 207					mixer[i].lm_ctl);
 208	}
 209
 210	/* initialize stage cfg */
 211	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
 212
 213	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
 214
 215	for (i = 0; i < cstate->num_mixers; i++) {
 216		ctl = mixer[i].lm_ctl;
 217		lm = mixer[i].hw_lm;
 218
 219		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
 220
 221		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
 222			mixer[i].hw_lm->idx);
 223
 224		/* stage config flush mask */
 225		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
 226
 227		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
 228			mixer[i].hw_lm->idx - LM_0,
 229			mixer[i].mixer_op_mode,
 230			ctl->idx - CTL_0,
 231			mixer[i].flush_mask);
 232
 233		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
 234			&dpu_crtc->stage_cfg);
 235	}
 236}
 237
 238/**
 239 *  _dpu_crtc_complete_flip - signal pending page_flip events
 240 * Any pending vblank events are added to the vblank_event_list
 241 * so that the next vblank interrupt shall signal them.
 242 * However PAGE_FLIP events are not handled through the vblank_event_list.
 243 * This API signals any pending PAGE_FLIP events requested through
 244 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
 245 * @crtc: Pointer to drm crtc structure
 246 */
 247static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
 248{
 249	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 250	struct drm_device *dev = crtc->dev;
 251	unsigned long flags;
 252
 253	spin_lock_irqsave(&dev->event_lock, flags);
 254	if (dpu_crtc->event) {
 255		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
 256			      dpu_crtc->event);
 257		trace_dpu_crtc_complete_flip(DRMID(crtc));
 258		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
 259		dpu_crtc->event = NULL;
 260	}
 261	spin_unlock_irqrestore(&dev->event_lock, flags);
 262}
 263
 264enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
 265{
 266	struct drm_encoder *encoder;
 267
 268	if (!crtc) {
 269		DPU_ERROR("invalid crtc\n");
 270		return INTF_MODE_NONE;
 271	}
 272
 273	/*
 274	 * TODO: This function is called from dpu debugfs and as part of atomic
 275	 * check. When called from debugfs, the crtc->mutex must be held to
 276	 * read crtc->state. However reading crtc->state from atomic check isn't
 277	 * allowed (unless you have a good reason, a big comment, and a deep
 278	 * understanding of how the atomic/modeset locks work (<- and this is
 279	 * probably not possible)). So we'll keep the WARN_ON here for now, but
 280	 * really we need to figure out a better way to track our operating mode
 281	 */
 282	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 283
 284	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
 285	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 286		return dpu_encoder_get_intf_mode(encoder);
 287
 288	return INTF_MODE_NONE;
 289}
 290
 291void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
 292{
 293	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 294
 295	/* keep statistics on vblank callback - with auto reset via debugfs */
 296	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
 297		dpu_crtc->vblank_cb_time = ktime_get();
 298	else
 299		dpu_crtc->vblank_cb_count++;
 300	_dpu_crtc_complete_flip(crtc);
 301	drm_crtc_handle_vblank(crtc);
 302	trace_dpu_crtc_vblank_cb(DRMID(crtc));
 303}
 304
 305static void dpu_crtc_frame_event_work(struct kthread_work *work)
 306{
 307	struct dpu_crtc_frame_event *fevent = container_of(work,
 308			struct dpu_crtc_frame_event, work);
 309	struct drm_crtc *crtc = fevent->crtc;
 310	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 311	unsigned long flags;
 312	bool frame_done = false;
 313
 314	DPU_ATRACE_BEGIN("crtc_frame_event");
 315
 316	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
 317			ktime_to_ns(fevent->ts));
 318
 319	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
 320				| DPU_ENCODER_FRAME_EVENT_ERROR
 321				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
 322
 323		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
 324			/* ignore vblank when not pending */
 325		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
 326			/* release bandwidth and other resources */
 327			trace_dpu_crtc_frame_event_done(DRMID(crtc),
 328							fevent->event);
 329			dpu_core_perf_crtc_release_bw(crtc);
 330		} else {
 331			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
 332								fevent->event);
 333		}
 334
 335		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
 336			dpu_core_perf_crtc_update(crtc, 0, false);
 337
 338		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
 339					| DPU_ENCODER_FRAME_EVENT_ERROR))
 340			frame_done = true;
 341	}
 342
 343	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
 344		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
 345				crtc->base.id, ktime_to_ns(fevent->ts));
 346
 347	if (frame_done)
 348		complete_all(&dpu_crtc->frame_done_comp);
 349
 350	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
 351	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
 352	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
 353	DPU_ATRACE_END("crtc_frame_event");
 354}
 355
 356/*
 357 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
 358 * registers this API to encoder for all frame event callbacks like
 359 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
 360 * from different context - IRQ, user thread, commit_thread, etc. Each event
 361 * should be carefully reviewed and should be processed in proper task context
 362 * to avoid schedulin delay or properly manage the irq context's bottom half
 363 * processing.
 364 */
 365static void dpu_crtc_frame_event_cb(void *data, u32 event)
 366{
 367	struct drm_crtc *crtc = (struct drm_crtc *)data;
 368	struct dpu_crtc *dpu_crtc;
 369	struct msm_drm_private *priv;
 370	struct dpu_crtc_frame_event *fevent;
 371	unsigned long flags;
 372	u32 crtc_id;
 373
 374	/* Nothing to do on idle event */
 375	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
 376		return;
 377
 378	dpu_crtc = to_dpu_crtc(crtc);
 379	priv = crtc->dev->dev_private;
 380	crtc_id = drm_crtc_index(crtc);
 381
 382	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
 383
 384	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
 385	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
 386			struct dpu_crtc_frame_event, list);
 387	if (fevent)
 388		list_del_init(&fevent->list);
 389	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
 390
 391	if (!fevent) {
 392		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
 393		return;
 394	}
 395
 396	fevent->event = event;
 397	fevent->crtc = crtc;
 398	fevent->ts = ktime_get();
 399	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
 400}
 401
 402void dpu_crtc_complete_commit(struct drm_crtc *crtc)
 403{
 404	trace_dpu_crtc_complete_commit(DRMID(crtc));
 405}
 406
 407static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 408		struct drm_crtc_state *state)
 409{
 410	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
 411	struct drm_display_mode *adj_mode = &state->adjusted_mode;
 412	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
 413	int i;
 414
 415	for (i = 0; i < cstate->num_mixers; i++) {
 416		struct drm_rect *r = &cstate->lm_bounds[i];
 417		r->x1 = crtc_split_width * i;
 418		r->y1 = 0;
 419		r->x2 = r->x1 + crtc_split_width;
 420		r->y2 = adj_mode->vdisplay;
 421
 422		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
 423	}
 424
 425	drm_mode_debug_printmodeline(adj_mode);
 426}
 427
 428static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
 429		struct dpu_hw_pcc_cfg *cfg)
 430{
 431	struct drm_color_ctm *ctm;
 432
 433	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
 434
 435	ctm = (struct drm_color_ctm *)state->ctm->data;
 436
 437	if (!ctm)
 438		return;
 439
 440	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
 441	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
 442	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
 443
 444	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
 445	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
 446	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
 447
 448	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
 449	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
 450	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
 451}
 452
 453static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
 454{
 455	struct drm_crtc_state *state = crtc->state;
 456	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
 457	struct dpu_crtc_mixer *mixer = cstate->mixers;
 458	struct dpu_hw_pcc_cfg cfg;
 459	struct dpu_hw_ctl *ctl;
 460	struct dpu_hw_mixer *lm;
 461	struct dpu_hw_dspp *dspp;
 462	int i;
 463
 464
 465	if (!state->color_mgmt_changed)
 466		return;
 467
 468	for (i = 0; i < cstate->num_mixers; i++) {
 469		ctl = mixer[i].lm_ctl;
 470		lm = mixer[i].hw_lm;
 471		dspp = mixer[i].hw_dspp;
 472
 473		if (!dspp || !dspp->ops.setup_pcc)
 474			continue;
 475
 476		if (!state->ctm) {
 477			dspp->ops.setup_pcc(dspp, NULL);
 478		} else {
 479			_dpu_crtc_get_pcc_coeff(state, &cfg);
 480			dspp->ops.setup_pcc(dspp, &cfg);
 481		}
 482
 483		mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
 484			mixer[i].hw_dspp->idx);
 485
 486		/* stage config flush mask */
 487		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
 488
 489		DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
 490			mixer[i].hw_lm->idx - DSPP_0,
 491			ctl->idx - CTL_0,
 492			mixer[i].flush_mask);
 493	}
 494}
 495
 496static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
 497		struct drm_crtc_state *old_state)
 498{
 499	struct dpu_crtc *dpu_crtc;
 500	struct dpu_crtc_state *cstate;
 501	struct drm_encoder *encoder;
 502	struct drm_device *dev;
 503	unsigned long flags;
 504
 505	if (!crtc) {
 506		DPU_ERROR("invalid crtc\n");
 507		return;
 508	}
 509
 510	if (!crtc->state->enable) {
 511		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
 512				crtc->base.id, crtc->state->enable);
 513		return;
 514	}
 515
 516	DPU_DEBUG("crtc%d\n", crtc->base.id);
 517
 518	dpu_crtc = to_dpu_crtc(crtc);
 519	cstate = to_dpu_crtc_state(crtc->state);
 520	dev = crtc->dev;
 521
 522	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
 523
 524	if (dpu_crtc->event) {
 525		WARN_ON(dpu_crtc->event);
 526	} else {
 527		spin_lock_irqsave(&dev->event_lock, flags);
 528		dpu_crtc->event = crtc->state->event;
 529		crtc->state->event = NULL;
 530		spin_unlock_irqrestore(&dev->event_lock, flags);
 531	}
 532
 533	/* encoder will trigger pending mask now */
 534	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 535		dpu_encoder_trigger_kickoff_pending(encoder);
 536
 537	/*
 538	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
 539	 * it means we are trying to flush a CRTC whose state is disabled:
 540	 * nothing else needs to be done.
 541	 */
 542	if (unlikely(!cstate->num_mixers))
 543		return;
 544
 545	_dpu_crtc_blend_setup(crtc);
 546
 547	_dpu_crtc_setup_cp_blocks(crtc);
 548
 549	/*
 550	 * PP_DONE irq is only used by command mode for now.
 551	 * It is better to request pending before FLUSH and START trigger
 552	 * to make sure no pp_done irq missed.
 553	 * This is safe because no pp_done will happen before SW trigger
 554	 * in command mode.
 555	 */
 556}
 557
 558static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
 559		struct drm_crtc_state *old_crtc_state)
 560{
 561	struct dpu_crtc *dpu_crtc;
 562	struct drm_device *dev;
 563	struct drm_plane *plane;
 564	struct msm_drm_private *priv;
 565	unsigned long flags;
 566	struct dpu_crtc_state *cstate;
 567
 568	if (!crtc->state->enable) {
 569		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
 570				crtc->base.id, crtc->state->enable);
 571		return;
 572	}
 573
 574	DPU_DEBUG("crtc%d\n", crtc->base.id);
 575
 576	dpu_crtc = to_dpu_crtc(crtc);
 577	cstate = to_dpu_crtc_state(crtc->state);
 578	dev = crtc->dev;
 579	priv = dev->dev_private;
 580
 581	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
 582		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
 583		return;
 584	}
 585
 586	if (dpu_crtc->event) {
 587		DPU_DEBUG("already received dpu_crtc->event\n");
 588	} else {
 589		spin_lock_irqsave(&dev->event_lock, flags);
 590		dpu_crtc->event = crtc->state->event;
 591		crtc->state->event = NULL;
 592		spin_unlock_irqrestore(&dev->event_lock, flags);
 593	}
 594
 595	/*
 596	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
 597	 * it means we are trying to flush a CRTC whose state is disabled:
 598	 * nothing else needs to be done.
 599	 */
 600	if (unlikely(!cstate->num_mixers))
 601		return;
 602
 603	/*
 604	 * For planes without commit update, drm framework will not add
 605	 * those planes to current state since hardware update is not
 606	 * required. However, if those planes were power collapsed since
 607	 * last commit cycle, driver has to restore the hardware state
 608	 * of those planes explicitly here prior to plane flush.
 609	 */
 610	drm_atomic_crtc_for_each_plane(plane, crtc)
 611		dpu_plane_restore(plane);
 612
 613	/* update performance setting before crtc kickoff */
 614	dpu_core_perf_crtc_update(crtc, 1, false);
 615
 616	/*
 617	 * Final plane updates: Give each plane a chance to complete all
 618	 *                      required writes/flushing before crtc's "flush
 619	 *                      everything" call below.
 620	 */
 621	drm_atomic_crtc_for_each_plane(plane, crtc) {
 622		if (dpu_crtc->smmu_state.transition_error)
 623			dpu_plane_set_error(plane, true);
 624		dpu_plane_flush(plane);
 625	}
 626
 627	/* Kickoff will be scheduled by outer layer */
 628}
 629
 630/**
 631 * dpu_crtc_destroy_state - state destroy hook
 632 * @crtc: drm CRTC
 633 * @state: CRTC state object to release
 634 */
 635static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
 636		struct drm_crtc_state *state)
 637{
 638	struct dpu_crtc_state *cstate;
 639
 640	if (!crtc || !state) {
 641		DPU_ERROR("invalid argument(s)\n");
 642		return;
 643	}
 644
 645	cstate = to_dpu_crtc_state(state);
 646
 647	DPU_DEBUG("crtc%d\n", crtc->base.id);
 648
 649	__drm_atomic_helper_crtc_destroy_state(state);
 650
 651	kfree(cstate);
 652}
 653
 654static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
 655{
 656	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 657	int ret, rc = 0;
 658
 659	if (!atomic_read(&dpu_crtc->frame_pending)) {
 660		DPU_DEBUG("no frames pending\n");
 661		return 0;
 662	}
 663
 664	DPU_ATRACE_BEGIN("frame done completion wait");
 665	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
 666			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
 667	if (!ret) {
 668		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
 669		rc = -ETIMEDOUT;
 670	}
 671	DPU_ATRACE_END("frame done completion wait");
 672
 673	return rc;
 674}
 675
 676void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
 677{
 678	struct drm_encoder *encoder;
 679	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 680	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
 681	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
 682
 683	/*
 684	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
 685	 * it means we are trying to start a CRTC whose state is disabled:
 686	 * nothing else needs to be done.
 687	 */
 688	if (unlikely(!cstate->num_mixers))
 689		return;
 690
 691	DPU_ATRACE_BEGIN("crtc_commit");
 692
 693	/*
 694	 * Encoder will flush/start now, unless it has a tx pending. If so, it
 695	 * may delay and flush at an irq event (e.g. ppdone)
 696	 */
 697	drm_for_each_encoder_mask(encoder, crtc->dev,
 698				  crtc->state->encoder_mask)
 699		dpu_encoder_prepare_for_kickoff(encoder);
 700
 701	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
 702		/* acquire bandwidth and other resources */
 703		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
 704	} else
 705		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
 706
 707	dpu_crtc->play_count++;
 708
 709	dpu_vbif_clear_errors(dpu_kms);
 710
 711	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 712		dpu_encoder_kickoff(encoder);
 713
 714	reinit_completion(&dpu_crtc->frame_done_comp);
 715	DPU_ATRACE_END("crtc_commit");
 716}
 717
 718static void dpu_crtc_reset(struct drm_crtc *crtc)
 719{
 720	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
 721
 722	if (crtc->state)
 723		dpu_crtc_destroy_state(crtc, crtc->state);
 724
 725	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
 726}
 727
 728/**
 729 * dpu_crtc_duplicate_state - state duplicate hook
 730 * @crtc: Pointer to drm crtc structure
 731 */
 732static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 733{
 734	struct dpu_crtc_state *cstate, *old_cstate;
 735
 736	if (!crtc || !crtc->state) {
 737		DPU_ERROR("invalid argument(s)\n");
 738		return NULL;
 739	}
 740
 741	old_cstate = to_dpu_crtc_state(crtc->state);
 742	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
 743	if (!cstate) {
 744		DPU_ERROR("failed to allocate state\n");
 745		return NULL;
 746	}
 747
 748	/* duplicate base helper */
 749	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
 750
 751	return &cstate->base;
 752}
 753
 754static void dpu_crtc_disable(struct drm_crtc *crtc,
 755			     struct drm_crtc_state *old_crtc_state)
 756{
 757	struct dpu_crtc *dpu_crtc;
 758	struct dpu_crtc_state *cstate;
 759	struct drm_encoder *encoder;
 760	unsigned long flags;
 761	bool release_bandwidth = false;
 762
 763	if (!crtc || !crtc->state) {
 764		DPU_ERROR("invalid crtc\n");
 765		return;
 766	}
 767	dpu_crtc = to_dpu_crtc(crtc);
 768	cstate = to_dpu_crtc_state(crtc->state);
 769
 770	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 771
 772	/* Disable/save vblank irq handling */
 773	drm_crtc_vblank_off(crtc);
 774
 775	drm_for_each_encoder_mask(encoder, crtc->dev,
 776				  old_crtc_state->encoder_mask) {
 777		/* in video mode, we hold an extra bandwidth reference
 778		 * as we cannot drop bandwidth at frame-done if any
 779		 * crtc is being used in video mode.
 780		 */
 781		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
 782			release_bandwidth = true;
 783		dpu_encoder_assign_crtc(encoder, NULL);
 784	}
 785
 786	/* wait for frame_event_done completion */
 787	if (_dpu_crtc_wait_for_frame_done(crtc))
 788		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
 789				crtc->base.id,
 790				atomic_read(&dpu_crtc->frame_pending));
 791
 792	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
 793	dpu_crtc->enabled = false;
 794
 795	if (atomic_read(&dpu_crtc->frame_pending)) {
 796		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
 797				     atomic_read(&dpu_crtc->frame_pending));
 798		if (release_bandwidth)
 799			dpu_core_perf_crtc_release_bw(crtc);
 800		atomic_set(&dpu_crtc->frame_pending, 0);
 801	}
 802
 803	dpu_core_perf_crtc_update(crtc, 0, true);
 804
 805	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 806		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
 807
 808	memset(cstate->mixers, 0, sizeof(cstate->mixers));
 809	cstate->num_mixers = 0;
 810
 811	/* disable clk & bw control until clk & bw properties are set */
 812	cstate->bw_control = false;
 813	cstate->bw_split_vote = false;
 814
 815	if (crtc->state->event && !crtc->state->active) {
 816		spin_lock_irqsave(&crtc->dev->event_lock, flags);
 817		drm_crtc_send_vblank_event(crtc, crtc->state->event);
 818		crtc->state->event = NULL;
 819		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 820	}
 821
 822	pm_runtime_put_sync(crtc->dev->dev);
 823}
 824
 825static void dpu_crtc_enable(struct drm_crtc *crtc,
 826		struct drm_crtc_state *old_crtc_state)
 827{
 828	struct dpu_crtc *dpu_crtc;
 829	struct drm_encoder *encoder;
 830	bool request_bandwidth = false;
 831
 832	if (!crtc) {
 833		DPU_ERROR("invalid crtc\n");
 834		return;
 835	}
 836
 837	pm_runtime_get_sync(crtc->dev->dev);
 838
 839	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 840	dpu_crtc = to_dpu_crtc(crtc);
 841
 842	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
 843		/* in video mode, we hold an extra bandwidth reference
 844		 * as we cannot drop bandwidth at frame-done if any
 845		 * crtc is being used in video mode.
 846		 */
 847		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
 848			request_bandwidth = true;
 849		dpu_encoder_register_frame_event_callback(encoder,
 850				dpu_crtc_frame_event_cb, (void *)crtc);
 851	}
 852
 853	if (request_bandwidth)
 854		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
 855
 856	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
 857	dpu_crtc->enabled = true;
 858
 859	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
 860		dpu_encoder_assign_crtc(encoder, crtc);
 861
 862	/* Enable/restore vblank irq handling */
 863	drm_crtc_vblank_on(crtc);
 864}
 865
 866struct plane_state {
 867	struct dpu_plane_state *dpu_pstate;
 868	const struct drm_plane_state *drm_pstate;
 869	int stage;
 870	u32 pipe_id;
 871};
 872
 873static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 874		struct drm_crtc_state *state)
 875{
 876	struct dpu_crtc *dpu_crtc;
 877	struct plane_state *pstates;
 878	struct dpu_crtc_state *cstate;
 879
 880	const struct drm_plane_state *pstate;
 881	struct drm_plane *plane;
 882	struct drm_display_mode *mode;
 883
 884	int cnt = 0, rc = 0, mixer_width, i, z_pos;
 885
 886	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
 887	int multirect_count = 0;
 888	const struct drm_plane_state *pipe_staged[SSPP_MAX];
 889	int left_zpos_cnt = 0, right_zpos_cnt = 0;
 890	struct drm_rect crtc_rect = { 0 };
 891
 892	if (!crtc) {
 893		DPU_ERROR("invalid crtc\n");
 894		return -EINVAL;
 895	}
 896
 897	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
 898
 899	dpu_crtc = to_dpu_crtc(crtc);
 900	cstate = to_dpu_crtc_state(state);
 901
 902	if (!state->enable || !state->active) {
 903		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
 904				crtc->base.id, state->enable, state->active);
 905		goto end;
 906	}
 907
 908	mode = &state->adjusted_mode;
 909	DPU_DEBUG("%s: check", dpu_crtc->name);
 910
 911	/* force a full mode set if active state changed */
 912	if (state->active_changed)
 913		state->mode_changed = true;
 914
 915	memset(pipe_staged, 0, sizeof(pipe_staged));
 916
 917	mixer_width = mode->hdisplay / cstate->num_mixers;
 918
 919	_dpu_crtc_setup_lm_bounds(crtc, state);
 920
 921	crtc_rect.x2 = mode->hdisplay;
 922	crtc_rect.y2 = mode->vdisplay;
 923
 924	 /* get plane state for all drm planes associated with crtc state */
 925	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 926		struct drm_rect dst, clip = crtc_rect;
 927
 928		if (IS_ERR_OR_NULL(pstate)) {
 929			rc = PTR_ERR(pstate);
 930			DPU_ERROR("%s: failed to get plane%d state, %d\n",
 931					dpu_crtc->name, plane->base.id, rc);
 932			goto end;
 933		}
 934		if (cnt >= DPU_STAGE_MAX * 4)
 935			continue;
 936
 937		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
 938		pstates[cnt].drm_pstate = pstate;
 939		pstates[cnt].stage = pstate->normalized_zpos;
 940		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
 941
 942		if (pipe_staged[pstates[cnt].pipe_id]) {
 943			multirect_plane[multirect_count].r0 =
 944				pipe_staged[pstates[cnt].pipe_id];
 945			multirect_plane[multirect_count].r1 = pstate;
 946			multirect_count++;
 947
 948			pipe_staged[pstates[cnt].pipe_id] = NULL;
 949		} else {
 950			pipe_staged[pstates[cnt].pipe_id] = pstate;
 951		}
 952
 953		cnt++;
 954
 955		dst = drm_plane_state_dest(pstate);
 956		if (!drm_rect_intersect(&clip, &dst)) {
 957			DPU_ERROR("invalid vertical/horizontal destination\n");
 958			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
 959				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
 960				  DRM_RECT_ARG(&dst));
 961			rc = -E2BIG;
 962			goto end;
 963		}
 964	}
 965
 966	for (i = 1; i < SSPP_MAX; i++) {
 967		if (pipe_staged[i]) {
 968			dpu_plane_clear_multirect(pipe_staged[i]);
 969
 970			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
 971				DPU_ERROR(
 972					"r1 only virt plane:%d not supported\n",
 973					pipe_staged[i]->plane->base.id);
 974				rc  = -EINVAL;
 975				goto end;
 976			}
 977		}
 978	}
 979
 980	z_pos = -1;
 981	for (i = 0; i < cnt; i++) {
 982		/* reset counts at every new blend stage */
 983		if (pstates[i].stage != z_pos) {
 984			left_zpos_cnt = 0;
 985			right_zpos_cnt = 0;
 986			z_pos = pstates[i].stage;
 987		}
 988
 989		/* verify z_pos setting before using it */
 990		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
 991			DPU_ERROR("> %d plane stages assigned\n",
 992					DPU_STAGE_MAX - DPU_STAGE_0);
 993			rc = -EINVAL;
 994			goto end;
 995		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
 996			if (left_zpos_cnt == 2) {
 997				DPU_ERROR("> 2 planes @ stage %d on left\n",
 998					z_pos);
 999				rc = -EINVAL;
1000				goto end;
1001			}
1002			left_zpos_cnt++;
1003
1004		} else {
1005			if (right_zpos_cnt == 2) {
1006				DPU_ERROR("> 2 planes @ stage %d on right\n",
1007					z_pos);
1008				rc = -EINVAL;
1009				goto end;
1010			}
1011			right_zpos_cnt++;
1012		}
1013
1014		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1015		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1016	}
1017
1018	for (i = 0; i < multirect_count; i++) {
1019		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1020			DPU_ERROR(
1021			"multirect validation failed for planes (%d - %d)\n",
1022					multirect_plane[i].r0->plane->base.id,
1023					multirect_plane[i].r1->plane->base.id);
1024			rc = -EINVAL;
1025			goto end;
1026		}
1027	}
1028
1029	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1030
1031	rc = dpu_core_perf_crtc_check(crtc, state);
1032	if (rc) {
1033		DPU_ERROR("crtc%d failed performance check %d\n",
1034				crtc->base.id, rc);
1035		goto end;
1036	}
1037
1038	/* validate source split:
1039	 * use pstates sorted by stage to check planes on same stage
1040	 * we assume that all pipes are in source split so its valid to compare
1041	 * without taking into account left/right mixer placement
1042	 */
1043	for (i = 1; i < cnt; i++) {
1044		struct plane_state *prv_pstate, *cur_pstate;
1045		struct drm_rect left_rect, right_rect;
1046		int32_t left_pid, right_pid;
1047		int32_t stage;
1048
1049		prv_pstate = &pstates[i - 1];
1050		cur_pstate = &pstates[i];
1051		if (prv_pstate->stage != cur_pstate->stage)
1052			continue;
1053
1054		stage = cur_pstate->stage;
1055
1056		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1057		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1058
1059		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1060		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1061
1062		if (right_rect.x1 < left_rect.x1) {
1063			swap(left_pid, right_pid);
1064			swap(left_rect, right_rect);
1065		}
1066
1067		/**
1068		 * - planes are enumerated in pipe-priority order such that
1069		 *   planes with lower drm_id must be left-most in a shared
1070		 *   blend-stage when using source split.
1071		 * - planes in source split must be contiguous in width
1072		 * - planes in source split must have same dest yoff and height
1073		 */
1074		if (right_pid < left_pid) {
1075			DPU_ERROR(
1076				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1077				stage, left_pid, right_pid);
1078			rc = -EINVAL;
1079			goto end;
1080		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1081			DPU_ERROR("non-contiguous coordinates for src split. "
1082				  "stage: %d left: " DRM_RECT_FMT " right: "
1083				  DRM_RECT_FMT "\n", stage,
1084				  DRM_RECT_ARG(&left_rect),
1085				  DRM_RECT_ARG(&right_rect));
1086			rc = -EINVAL;
1087			goto end;
1088		} else if (left_rect.y1 != right_rect.y1 ||
1089			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1090			DPU_ERROR("source split at stage: %d. invalid "
1091				  "yoff/height: left: " DRM_RECT_FMT " right: "
1092				  DRM_RECT_FMT "\n", stage,
1093				  DRM_RECT_ARG(&left_rect),
1094				  DRM_RECT_ARG(&right_rect));
1095			rc = -EINVAL;
1096			goto end;
1097		}
1098	}
1099
1100end:
1101	kfree(pstates);
1102	return rc;
1103}
1104
1105int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1106{
1107	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1108	struct drm_encoder *enc;
1109
1110	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1111
1112	/*
1113	 * Normally we would iterate through encoder_mask in crtc state to find
1114	 * attached encoders. In this case, we might be disabling vblank _after_
1115	 * encoder_mask has been cleared.
1116	 *
1117	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1118	 * disable (which is also after encoder_mask is cleared). So instead of
1119	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1120	 * currently assigned to our crtc.
1121	 *
1122	 * Note also that this function cannot be called while crtc is disabled
1123	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1124	 * about the assigned crtcs being inconsistent with the current state
1125	 * (which means no need to worry about modeset locks).
1126	 */
1127	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1128		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1129					     dpu_crtc);
1130
1131		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1132	}
1133
1134	return 0;
1135}
1136
1137#ifdef CONFIG_DEBUG_FS
1138static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1139{
1140	struct dpu_crtc *dpu_crtc;
1141	struct dpu_plane_state *pstate = NULL;
1142	struct dpu_crtc_mixer *m;
1143
1144	struct drm_crtc *crtc;
1145	struct drm_plane *plane;
1146	struct drm_display_mode *mode;
1147	struct drm_framebuffer *fb;
1148	struct drm_plane_state *state;
1149	struct dpu_crtc_state *cstate;
1150
1151	int i, out_width;
1152
1153	dpu_crtc = s->private;
1154	crtc = &dpu_crtc->base;
1155
1156	drm_modeset_lock_all(crtc->dev);
1157	cstate = to_dpu_crtc_state(crtc->state);
1158
1159	mode = &crtc->state->adjusted_mode;
1160	out_width = mode->hdisplay / cstate->num_mixers;
1161
1162	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1163				mode->hdisplay, mode->vdisplay);
1164
1165	seq_puts(s, "\n");
1166
1167	for (i = 0; i < cstate->num_mixers; ++i) {
1168		m = &cstate->mixers[i];
1169		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1170			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1171			out_width, mode->vdisplay);
1172	}
1173
1174	seq_puts(s, "\n");
1175
1176	drm_atomic_crtc_for_each_plane(plane, crtc) {
1177		pstate = to_dpu_plane_state(plane->state);
1178		state = plane->state;
1179
1180		if (!pstate || !state)
1181			continue;
1182
1183		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1184			pstate->stage);
1185
1186		if (plane->state->fb) {
1187			fb = plane->state->fb;
1188
1189			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1190				fb->base.id, (char *) &fb->format->format,
1191				fb->width, fb->height);
1192			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1193				seq_printf(s, "cpp[%d]:%u ",
1194						i, fb->format->cpp[i]);
1195			seq_puts(s, "\n\t");
1196
1197			seq_printf(s, "modifier:%8llu ", fb->modifier);
1198			seq_puts(s, "\n");
1199
1200			seq_puts(s, "\t");
1201			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1202				seq_printf(s, "pitches[%d]:%8u ", i,
1203							fb->pitches[i]);
1204			seq_puts(s, "\n");
1205
1206			seq_puts(s, "\t");
1207			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1208				seq_printf(s, "offsets[%d]:%8u ", i,
1209							fb->offsets[i]);
1210			seq_puts(s, "\n");
1211		}
1212
1213		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1214			state->src_x, state->src_y, state->src_w, state->src_h);
1215
1216		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1217			state->crtc_x, state->crtc_y, state->crtc_w,
1218			state->crtc_h);
1219		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1220			pstate->multirect_mode, pstate->multirect_index);
1221
1222		seq_puts(s, "\n");
1223	}
1224	if (dpu_crtc->vblank_cb_count) {
1225		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1226		s64 diff_ms = ktime_to_ms(diff);
1227		s64 fps = diff_ms ? div_s64(
1228				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1229
1230		seq_printf(s,
1231			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1232				fps, dpu_crtc->vblank_cb_count,
1233				ktime_to_ms(diff), dpu_crtc->play_count);
1234
1235		/* reset time & count for next measurement */
1236		dpu_crtc->vblank_cb_count = 0;
1237		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1238	}
1239
1240	drm_modeset_unlock_all(crtc->dev);
1241
1242	return 0;
1243}
1244
1245static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1246{
1247	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1248}
1249
1250#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1251static int __prefix ## _open(struct inode *inode, struct file *file)	\
1252{									\
1253	return single_open(file, __prefix ## _show, inode->i_private);	\
1254}									\
1255static const struct file_operations __prefix ## _fops = {		\
1256	.owner = THIS_MODULE,						\
1257	.open = __prefix ## _open,					\
1258	.release = single_release,					\
1259	.read = seq_read,						\
1260	.llseek = seq_lseek,						\
1261}
1262
1263static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1264{
1265	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1266	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1267
1268	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1269	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1270	seq_printf(s, "core_clk_rate: %llu\n",
1271			dpu_crtc->cur_perf.core_clk_rate);
1272	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1273	seq_printf(s, "max_per_pipe_ib: %llu\n",
1274				dpu_crtc->cur_perf.max_per_pipe_ib);
1275
1276	return 0;
1277}
1278DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1279
1280static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1281{
1282	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1283
1284	static const struct file_operations debugfs_status_fops = {
1285		.open =		_dpu_debugfs_status_open,
1286		.read =		seq_read,
1287		.llseek =	seq_lseek,
1288		.release =	single_release,
1289	};
1290
1291	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1292			crtc->dev->primary->debugfs_root);
1293
1294	debugfs_create_file("status", 0400,
1295			dpu_crtc->debugfs_root,
1296			dpu_crtc, &debugfs_status_fops);
1297	debugfs_create_file("state", 0600,
1298			dpu_crtc->debugfs_root,
1299			&dpu_crtc->base,
1300			&dpu_crtc_debugfs_state_fops);
1301
1302	return 0;
1303}
1304#else
1305static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1306{
1307	return 0;
1308}
1309#endif /* CONFIG_DEBUG_FS */
1310
1311static int dpu_crtc_late_register(struct drm_crtc *crtc)
1312{
1313	return _dpu_crtc_init_debugfs(crtc);
1314}
1315
1316static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1317{
1318	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1319
1320	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1321}
1322
1323static const struct drm_crtc_funcs dpu_crtc_funcs = {
1324	.set_config = drm_atomic_helper_set_config,
1325	.destroy = dpu_crtc_destroy,
1326	.page_flip = drm_atomic_helper_page_flip,
1327	.reset = dpu_crtc_reset,
1328	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1329	.atomic_destroy_state = dpu_crtc_destroy_state,
1330	.late_register = dpu_crtc_late_register,
1331	.early_unregister = dpu_crtc_early_unregister,
1332	.enable_vblank  = msm_crtc_enable_vblank,
1333	.disable_vblank = msm_crtc_disable_vblank,
1334};
1335
1336static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1337	.atomic_disable = dpu_crtc_disable,
1338	.atomic_enable = dpu_crtc_enable,
1339	.atomic_check = dpu_crtc_atomic_check,
1340	.atomic_begin = dpu_crtc_atomic_begin,
1341	.atomic_flush = dpu_crtc_atomic_flush,
1342};
1343
1344/* initialize crtc */
1345struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1346				struct drm_plane *cursor)
1347{
1348	struct drm_crtc *crtc = NULL;
1349	struct dpu_crtc *dpu_crtc = NULL;
1350	int i;
1351
1352	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1353	if (!dpu_crtc)
1354		return ERR_PTR(-ENOMEM);
1355
1356	crtc = &dpu_crtc->base;
1357	crtc->dev = dev;
1358
1359	spin_lock_init(&dpu_crtc->spin_lock);
1360	atomic_set(&dpu_crtc->frame_pending, 0);
1361
1362	init_completion(&dpu_crtc->frame_done_comp);
1363
1364	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1365
1366	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1367		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1368		list_add(&dpu_crtc->frame_events[i].list,
1369				&dpu_crtc->frame_event_list);
1370		kthread_init_work(&dpu_crtc->frame_events[i].work,
1371				dpu_crtc_frame_event_work);
1372	}
1373
1374	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1375				NULL);
1376
1377	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1378
1379	drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1380
1381	/* save user friendly CRTC name for later */
1382	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1383
1384	/* initialize event handling */
1385	spin_lock_init(&dpu_crtc->event_lock);
1386
1387	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1388	return crtc;
1389}