Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4 */
  5
  6#include <linux/delay.h>
  7
  8#include <drm/drm_managed.h>
  9
 10#include "dpu_hwio.h"
 11#include "dpu_hw_ctl.h"
 12#include "dpu_kms.h"
 13#include "dpu_trace.h"
 14
 15#define   CTL_LAYER(lm)                 \
 16	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
 17#define   CTL_LAYER_EXT(lm)             \
 18	(0x40 + (((lm) - LM_0) * 0x004))
 19#define   CTL_LAYER_EXT2(lm)             \
 20	(0x70 + (((lm) - LM_0) * 0x004))
 21#define   CTL_LAYER_EXT3(lm)             \
 22	(0xA0 + (((lm) - LM_0) * 0x004))
 23#define CTL_LAYER_EXT4(lm)             \
 24	(0xB8 + (((lm) - LM_0) * 0x004))
 25#define   CTL_TOP                       0x014
 26#define   CTL_FLUSH                     0x018
 27#define   CTL_START                     0x01C
 28#define   CTL_PREPARE                   0x0d0
 29#define   CTL_SW_RESET                  0x030
 30#define   CTL_LAYER_EXTN_OFFSET         0x40
 31#define   CTL_MERGE_3D_ACTIVE           0x0E4
 32#define   CTL_DSC_ACTIVE                0x0E8
 33#define   CTL_WB_ACTIVE                 0x0EC
 34#define   CTL_INTF_ACTIVE               0x0F4
 35#define   CTL_CDM_ACTIVE                0x0F8
 36#define   CTL_FETCH_PIPE_ACTIVE         0x0FC
 37#define   CTL_MERGE_3D_FLUSH            0x100
 38#define   CTL_DSC_FLUSH                0x104
 39#define   CTL_WB_FLUSH                  0x108
 40#define   CTL_INTF_FLUSH                0x110
 41#define   CTL_CDM_FLUSH                0x114
 42#define   CTL_PERIPH_FLUSH              0x128
 43#define   CTL_INTF_MASTER               0x134
 44#define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
 45
 46#define CTL_MIXER_BORDER_OUT            BIT(24)
 47#define CTL_FLUSH_MASK_CTL              BIT(17)
 48
 49#define DPU_REG_RESET_TIMEOUT_US        2000
 50#define  MERGE_3D_IDX   23
 51#define  DSC_IDX        22
 52#define CDM_IDX         26
 53#define  PERIPH_IDX     30
 54#define  INTF_IDX       31
 55#define WB_IDX          16
 56#define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
 57#define CTL_INVALID_BIT                 0xffff
 58#define CTL_DEFAULT_GROUP_ID		0xf
 59
 60static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
 61	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
 62	1, 2, 3, 4, 5};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63
 64static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
 65		enum dpu_lm lm)
 66{
 67	int i;
 68	int stages = -EINVAL;
 69
 70	for (i = 0; i < count; i++) {
 71		if (lm == mixer[i].id) {
 72			stages = mixer[i].sblk->maxblendstages;
 73			break;
 74		}
 75	}
 76
 77	return stages;
 78}
 79
 80static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
 81{
 82	struct dpu_hw_blk_reg_map *c = &ctx->hw;
 83
 84	return DPU_REG_READ(c, CTL_FLUSH);
 85}
 86
 87static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
 88{
 89	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
 90				       dpu_hw_ctl_get_flush_register(ctx));
 91	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 92}
 93
 94static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
 95{
 96	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
 97}
 98
 99static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
100{
101	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
102					 dpu_hw_ctl_get_flush_register(ctx));
103	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
104}
105
106static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
107{
108	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
109				     dpu_hw_ctl_get_flush_register(ctx));
110	ctx->pending_flush_mask = 0x0;
111	ctx->pending_intf_flush_mask = 0;
112	ctx->pending_wb_flush_mask = 0;
113	ctx->pending_merge_3d_flush_mask = 0;
114	ctx->pending_dsc_flush_mask = 0;
115	ctx->pending_cdm_flush_mask = 0;
116
117	memset(ctx->pending_dspp_flush_mask, 0,
118		sizeof(ctx->pending_dspp_flush_mask));
119}
120
121static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
122		u32 flushbits)
123{
124	trace_dpu_hw_ctl_update_pending_flush(flushbits,
125					      ctx->pending_flush_mask);
126	ctx->pending_flush_mask |= flushbits;
127}
128
129static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
130{
131	return ctx->pending_flush_mask;
132}
133
134static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
135{
136	int dspp;
137
138	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
139		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
140				ctx->pending_merge_3d_flush_mask);
141	if (ctx->pending_flush_mask & BIT(INTF_IDX))
142		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
143				ctx->pending_intf_flush_mask);
144	if (ctx->pending_flush_mask & BIT(WB_IDX))
145		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
146				ctx->pending_wb_flush_mask);
147
148	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
149		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
150			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
151				DPU_REG_WRITE(&ctx->hw,
152				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
153				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
154		}
155
156	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
157		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
158			      ctx->pending_periph_flush_mask);
159
160	if (ctx->pending_flush_mask & BIT(DSC_IDX))
161		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
162			      ctx->pending_dsc_flush_mask);
163
164	if (ctx->pending_flush_mask & BIT(CDM_IDX))
165		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
166			      ctx->pending_cdm_flush_mask);
167
168	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
169}
170
171static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
172{
173	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
174				     dpu_hw_ctl_get_flush_register(ctx));
175	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
176}
177
178static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
179	enum dpu_sspp sspp)
180{
 
 
181	switch (sspp) {
182	case SSPP_VIG0:
183		ctx->pending_flush_mask |=  BIT(0);
184		break;
185	case SSPP_VIG1:
186		ctx->pending_flush_mask |= BIT(1);
187		break;
188	case SSPP_VIG2:
189		ctx->pending_flush_mask |= BIT(2);
190		break;
191	case SSPP_VIG3:
192		ctx->pending_flush_mask |= BIT(18);
193		break;
194	case SSPP_RGB0:
195		ctx->pending_flush_mask |= BIT(3);
196		break;
197	case SSPP_RGB1:
198		ctx->pending_flush_mask |= BIT(4);
199		break;
200	case SSPP_RGB2:
201		ctx->pending_flush_mask |= BIT(5);
202		break;
203	case SSPP_RGB3:
204		ctx->pending_flush_mask |= BIT(19);
205		break;
206	case SSPP_DMA0:
207		ctx->pending_flush_mask |= BIT(11);
208		break;
209	case SSPP_DMA1:
210		ctx->pending_flush_mask |= BIT(12);
211		break;
212	case SSPP_DMA2:
213		ctx->pending_flush_mask |= BIT(24);
214		break;
215	case SSPP_DMA3:
216		ctx->pending_flush_mask |= BIT(25);
217		break;
218	case SSPP_DMA4:
219		ctx->pending_flush_mask |= BIT(13);
220		break;
221	case SSPP_DMA5:
222		ctx->pending_flush_mask |= BIT(14);
223		break;
224	case SSPP_CURSOR0:
225		ctx->pending_flush_mask |= BIT(22);
226		break;
227	case SSPP_CURSOR1:
228		ctx->pending_flush_mask |= BIT(23);
229		break;
230	default:
231		break;
232	}
 
 
233}
234
235static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
236	enum dpu_lm lm)
237{
 
 
238	switch (lm) {
239	case LM_0:
240		ctx->pending_flush_mask |= BIT(6);
241		break;
242	case LM_1:
243		ctx->pending_flush_mask |= BIT(7);
244		break;
245	case LM_2:
246		ctx->pending_flush_mask |= BIT(8);
247		break;
248	case LM_3:
249		ctx->pending_flush_mask |= BIT(9);
250		break;
251	case LM_4:
252		ctx->pending_flush_mask |= BIT(10);
253		break;
254	case LM_5:
255		ctx->pending_flush_mask |= BIT(20);
256		break;
257	default:
258		break;
259	}
260
261	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
 
 
262}
263
264static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
265		enum dpu_intf intf)
266{
267	switch (intf) {
268	case INTF_0:
269		ctx->pending_flush_mask |= BIT(31);
270		break;
271	case INTF_1:
272		ctx->pending_flush_mask |= BIT(30);
273		break;
274	case INTF_2:
275		ctx->pending_flush_mask |= BIT(29);
276		break;
277	case INTF_3:
278		ctx->pending_flush_mask |= BIT(28);
279		break;
280	default:
281		break;
282	}
283}
284
285static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
286		enum dpu_wb wb)
287{
288	switch (wb) {
289	case WB_0:
290	case WB_1:
291	case WB_2:
292		ctx->pending_flush_mask |= BIT(WB_IDX);
293		break;
294	default:
295		break;
296	}
297}
298
299static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
300{
301	/* update pending flush only if CDM_0 is flushed */
302	if (cdm_num == CDM_0)
303		ctx->pending_flush_mask |= BIT(CDM_IDX);
304}
305
306static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
307		enum dpu_wb wb)
308{
309	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
310	ctx->pending_flush_mask |= BIT(WB_IDX);
311}
312
313static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
314		enum dpu_intf intf)
315{
316	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
317	ctx->pending_flush_mask |= BIT(INTF_IDX);
318}
319
320static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
321						      enum dpu_intf intf)
322{
323	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
324	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
325}
326
327static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
328		enum dpu_merge_3d merge_3d)
329{
330	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
331	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
332}
333
334static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
335						   enum dpu_dsc dsc_num)
336{
337	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
338	ctx->pending_flush_mask |= BIT(DSC_IDX);
339}
340
341static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
342{
343	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
344	ctx->pending_flush_mask |= BIT(CDM_IDX);
345}
346
347static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
348	enum dpu_dspp dspp, u32 dspp_sub_blk)
349{
350	switch (dspp) {
351	case DSPP_0:
352		ctx->pending_flush_mask |= BIT(13);
353		break;
354	case DSPP_1:
355		ctx->pending_flush_mask |= BIT(14);
356		break;
357	case DSPP_2:
358		ctx->pending_flush_mask |= BIT(15);
359		break;
360	case DSPP_3:
361		ctx->pending_flush_mask |= BIT(21);
362		break;
363	default:
364		break;
365	}
366}
367
368static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
369	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
370{
371	if (dspp >= DSPP_MAX)
372		return;
373
374	switch (dspp_sub_blk) {
375	case DPU_DSPP_PCC:
376		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
377		break;
378	default:
379		return;
380	}
381
382	ctx->pending_flush_mask |= BIT(DSPP_IDX);
383}
384
385static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
386{
387	struct dpu_hw_blk_reg_map *c = &ctx->hw;
388	ktime_t timeout;
389	u32 status;
390
391	timeout = ktime_add_us(ktime_get(), timeout_us);
392
393	/*
394	 * it takes around 30us to have mdp finish resetting its ctl path
395	 * poll every 50us so that reset should be completed at 1st poll
396	 */
397	do {
398		status = DPU_REG_READ(c, CTL_SW_RESET);
399		status &= 0x1;
400		if (status)
401			usleep_range(20, 50);
402	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
403
404	return status;
405}
406
407static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
408{
409	struct dpu_hw_blk_reg_map *c = &ctx->hw;
410
411	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
412	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
413	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
414		return -EINVAL;
415
416	return 0;
417}
418
419static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
420{
421	struct dpu_hw_blk_reg_map *c = &ctx->hw;
422	u32 status;
423
424	status = DPU_REG_READ(c, CTL_SW_RESET);
425	status &= 0x01;
426	if (!status)
427		return 0;
428
429	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
430	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
431		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
432		return -EINVAL;
433	}
434
435	return 0;
436}
437
438static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
439{
440	struct dpu_hw_blk_reg_map *c = &ctx->hw;
441	int i;
442
443	for (i = 0; i < ctx->mixer_count; i++) {
444		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
445
446		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
447		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
448		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
449		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
450	}
451
452	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
453}
454
455struct ctl_blend_config {
456	int idx, shift, ext_shift;
457};
458
459static const struct ctl_blend_config ctl_blend_config[][2] = {
460	[SSPP_NONE] = { { -1 }, { -1 } },
461	[SSPP_MAX] =  { { -1 }, { -1 } },
462	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
463	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
464	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
465	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
466	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
467	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
468	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
469	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
470	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
471	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
472	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
473	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
474	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
475	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
476	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
477	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
478};
479
480static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
481	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
482{
483	struct dpu_hw_blk_reg_map *c = &ctx->hw;
484	u32 mix, ext, mix_ext;
485	u32 mixercfg[5] = { 0 };
486	int i, j;
487	int stages;
488	int pipes_per_stage;
489
490	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
491	if (stages < 0)
492		return;
493
494	if (test_bit(DPU_MIXER_SOURCESPLIT,
495		&ctx->mixer_hw_caps->features))
496		pipes_per_stage = PIPES_PER_STAGE;
497	else
498		pipes_per_stage = 1;
499
500	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
501
502	if (!stage_cfg)
503		goto exit;
504
505	for (i = 0; i <= stages; i++) {
506		/* overflow to ext register if 'i + 1 > 7' */
507		mix = (i + 1) & 0x7;
508		ext = i >= 7;
509		mix_ext = (i + 1) & 0xf;
510
511		for (j = 0 ; j < pipes_per_stage; j++) {
512			enum dpu_sspp_multirect_index rect_index =
513				stage_cfg->multirect_index[i][j];
514			enum dpu_sspp pipe = stage_cfg->stage[i][j];
515			const struct ctl_blend_config *cfg =
516				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
517
518			/*
519			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
520			 * all EXT registers has 4-bit fields.
521			 */
522			if (cfg->idx == -1) {
523				continue;
524			} else if (cfg->idx == 0) {
525				mixercfg[0] |= mix << cfg->shift;
526				mixercfg[1] |= ext << cfg->ext_shift;
527			} else {
528				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529			}
530		}
531	}
532
533exit:
534	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
535	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
536	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
537	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
538	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
539		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
540}
541
542
543static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
544		struct dpu_hw_intf_cfg *cfg)
545{
546	struct dpu_hw_blk_reg_map *c = &ctx->hw;
547	u32 intf_active = 0;
548	u32 dsc_active = 0;
549	u32 wb_active = 0;
550	u32 mode_sel = 0;
551
552	/* CTL_TOP[31:28] carries group_id to collate CTL paths
553	 * per VM. Explicitly disable it until VM support is
554	 * added in SW. Power on reset value is not disable.
555	 */
556	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
557		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
558
559	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
560		mode_sel |= BIT(17);
561
562	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
563	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
564	dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
565
566	if (cfg->intf)
567		intf_active |= BIT(cfg->intf - INTF_0);
568
569	if (cfg->wb)
570		wb_active |= BIT(cfg->wb - WB_0);
571
572	if (cfg->dsc)
573		dsc_active |= cfg->dsc;
574
575	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
576	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
577	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
578	DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
579
580	if (cfg->merge_3d)
581		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
582			      BIT(cfg->merge_3d - MERGE_3D_0));
583
584	if (cfg->cdm)
585		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
586}
587
588static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
589		struct dpu_hw_intf_cfg *cfg)
590{
591	struct dpu_hw_blk_reg_map *c = &ctx->hw;
592	u32 intf_cfg = 0;
593
594	intf_cfg |= (cfg->intf & 0xF) << 4;
595
596	if (cfg->mode_3d) {
597		intf_cfg |= BIT(19);
598		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
599	}
600
601	if (cfg->wb)
602		intf_cfg |= (cfg->wb & 0x3) + 2;
603
604	switch (cfg->intf_mode_sel) {
605	case DPU_CTL_MODE_SEL_VID:
606		intf_cfg &= ~BIT(17);
607		intf_cfg &= ~(0x3 << 15);
608		break;
609	case DPU_CTL_MODE_SEL_CMD:
610		intf_cfg |= BIT(17);
611		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
612		break;
613	default:
614		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
615		return;
616	}
617
618	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
619}
620
621static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
622		struct dpu_hw_intf_cfg *cfg)
623{
624	struct dpu_hw_blk_reg_map *c = &ctx->hw;
625	u32 intf_active = 0;
626	u32 wb_active = 0;
627	u32 merge3d_active = 0;
628	u32 dsc_active;
629	u32 cdm_active;
630
631	/*
632	 * This API resets each portion of the CTL path namely,
633	 * clearing the sspps staged on the lm, merge_3d block,
634	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
635	 * This will be used for writeback to begin with to have a
636	 * proper teardown of the writeback session but upon further
637	 * validation, this can be extended to all interfaces.
638	 */
639	if (cfg->merge_3d) {
640		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
641		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
642		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
643				merge3d_active);
644	}
645
646	dpu_hw_ctl_clear_all_blendstages(ctx);
647
648	if (cfg->intf) {
649		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
650		intf_active &= ~BIT(cfg->intf - INTF_0);
651		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
652	}
653
654	if (cfg->wb) {
655		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
656		wb_active &= ~BIT(cfg->wb - WB_0);
657		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
658	}
659
660	if (cfg->dsc) {
661		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
662		dsc_active &= ~cfg->dsc;
663		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
664	}
665
666	if (cfg->cdm) {
667		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
668		cdm_active &= ~cfg->cdm;
669		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
670	}
671}
672
673static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
674	unsigned long *fetch_active)
675{
676	int i;
677	u32 val = 0;
678
679	if (fetch_active) {
680		for (i = 0; i < SSPP_MAX; i++) {
681			if (test_bit(i, fetch_active) &&
682				fetch_tbl[i] != CTL_INVALID_BIT)
683				val |= BIT(fetch_tbl[i]);
684		}
685	}
686
687	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
688}
689
690static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
691		unsigned long cap)
692{
693	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
694		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
695		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
696		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
697		ops->update_pending_flush_intf =
698			dpu_hw_ctl_update_pending_flush_intf_v1;
699
700		ops->update_pending_flush_periph =
701			dpu_hw_ctl_update_pending_flush_periph_v1;
702
703		ops->update_pending_flush_merge_3d =
704			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
705		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
706		ops->update_pending_flush_dsc =
707			dpu_hw_ctl_update_pending_flush_dsc_v1;
708		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
709	} else {
710		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
711		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
712		ops->update_pending_flush_intf =
713			dpu_hw_ctl_update_pending_flush_intf;
714		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
715		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
716	}
717	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
718	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
719	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
720	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
721	ops->trigger_start = dpu_hw_ctl_trigger_start;
722	ops->is_started = dpu_hw_ctl_is_started;
723	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
724	ops->reset = dpu_hw_ctl_reset_control;
725	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
726	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
727	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
728	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
729	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
730	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
731		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
732	else
733		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
734
735	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
736		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
737};
738
739struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
740				   const struct dpu_ctl_cfg *cfg,
741				   void __iomem *addr,
742				   u32 mixer_count,
743				   const struct dpu_lm_cfg *mixer)
744{
745	struct dpu_hw_ctl *c;
 
746
747	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
748	if (!c)
749		return ERR_PTR(-ENOMEM);
750
751	c->hw.blk_addr = addr + cfg->base;
752	c->hw.log_mask = DPU_DBG_MASK_CTL;
 
 
 
 
753
754	c->caps = cfg;
755	_setup_ctl_ops(&c->ops, c->caps->features);
756	c->idx = cfg->id;
757	c->mixer_count = mixer_count;
758	c->mixer_hw_caps = mixer;
759
760	return c;
 
 
 
 
 
761}
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
 
  3 */
  4
  5#include <linux/delay.h>
 
 
 
  6#include "dpu_hwio.h"
  7#include "dpu_hw_ctl.h"
  8#include "dpu_kms.h"
  9#include "dpu_trace.h"
 10
 11#define   CTL_LAYER(lm)                 \
 12	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
 13#define   CTL_LAYER_EXT(lm)             \
 14	(0x40 + (((lm) - LM_0) * 0x004))
 15#define   CTL_LAYER_EXT2(lm)             \
 16	(0x70 + (((lm) - LM_0) * 0x004))
 17#define   CTL_LAYER_EXT3(lm)             \
 18	(0xA0 + (((lm) - LM_0) * 0x004))
 
 
 19#define   CTL_TOP                       0x014
 20#define   CTL_FLUSH                     0x018
 21#define   CTL_START                     0x01C
 22#define   CTL_PREPARE                   0x0d0
 23#define   CTL_SW_RESET                  0x030
 24#define   CTL_LAYER_EXTN_OFFSET         0x40
 25#define   CTL_MERGE_3D_ACTIVE           0x0E4
 
 
 26#define   CTL_INTF_ACTIVE               0x0F4
 
 
 27#define   CTL_MERGE_3D_FLUSH            0x100
 
 
 28#define   CTL_INTF_FLUSH                0x110
 
 
 29#define   CTL_INTF_MASTER               0x134
 30#define   CTL_FETCH_PIPE_ACTIVE         0x0FC
 31
 32#define CTL_MIXER_BORDER_OUT            BIT(24)
 33#define CTL_FLUSH_MASK_CTL              BIT(17)
 34
 35#define DPU_REG_RESET_TIMEOUT_US        2000
 36#define  MERGE_3D_IDX   23
 
 
 
 37#define  INTF_IDX       31
 
 
 38#define CTL_INVALID_BIT                 0xffff
 
 39
 40static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
 41	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
 42	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
 43
 44static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
 45		const struct dpu_mdss_cfg *m,
 46		void __iomem *addr,
 47		struct dpu_hw_blk_reg_map *b)
 48{
 49	int i;
 50
 51	for (i = 0; i < m->ctl_count; i++) {
 52		if (ctl == m->ctl[i].id) {
 53			b->base_off = addr;
 54			b->blk_off = m->ctl[i].base;
 55			b->length = m->ctl[i].len;
 56			b->hwversion = m->hwversion;
 57			b->log_mask = DPU_DBG_MASK_CTL;
 58			return &m->ctl[i];
 59		}
 60	}
 61	return ERR_PTR(-ENOMEM);
 62}
 63
 64static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
 65		enum dpu_lm lm)
 66{
 67	int i;
 68	int stages = -EINVAL;
 69
 70	for (i = 0; i < count; i++) {
 71		if (lm == mixer[i].id) {
 72			stages = mixer[i].sblk->maxblendstages;
 73			break;
 74		}
 75	}
 76
 77	return stages;
 78}
 79
 80static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
 81{
 82	struct dpu_hw_blk_reg_map *c = &ctx->hw;
 83
 84	return DPU_REG_READ(c, CTL_FLUSH);
 85}
 86
 87static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
 88{
 89	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
 90				       dpu_hw_ctl_get_flush_register(ctx));
 91	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 92}
 93
 
 
 
 
 
 94static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
 95{
 96	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
 97					 dpu_hw_ctl_get_flush_register(ctx));
 98	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
 99}
100
101static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
102{
103	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
104				     dpu_hw_ctl_get_flush_register(ctx));
105	ctx->pending_flush_mask = 0x0;
 
 
 
 
 
 
 
 
106}
107
108static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
109		u32 flushbits)
110{
111	trace_dpu_hw_ctl_update_pending_flush(flushbits,
112					      ctx->pending_flush_mask);
113	ctx->pending_flush_mask |= flushbits;
114}
115
116static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
117{
118	return ctx->pending_flush_mask;
119}
120
121static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
122{
 
123
124	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
125		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
126				ctx->pending_merge_3d_flush_mask);
127	if (ctx->pending_flush_mask & BIT(INTF_IDX))
128		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
129				ctx->pending_intf_flush_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
131	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
132}
133
134static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
135{
136	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
137				     dpu_hw_ctl_get_flush_register(ctx));
138	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
139}
140
141static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
142	enum dpu_sspp sspp)
143{
144	uint32_t flushbits = 0;
145
146	switch (sspp) {
147	case SSPP_VIG0:
148		flushbits =  BIT(0);
149		break;
150	case SSPP_VIG1:
151		flushbits = BIT(1);
152		break;
153	case SSPP_VIG2:
154		flushbits = BIT(2);
155		break;
156	case SSPP_VIG3:
157		flushbits = BIT(18);
158		break;
159	case SSPP_RGB0:
160		flushbits = BIT(3);
161		break;
162	case SSPP_RGB1:
163		flushbits = BIT(4);
164		break;
165	case SSPP_RGB2:
166		flushbits = BIT(5);
167		break;
168	case SSPP_RGB3:
169		flushbits = BIT(19);
170		break;
171	case SSPP_DMA0:
172		flushbits = BIT(11);
173		break;
174	case SSPP_DMA1:
175		flushbits = BIT(12);
176		break;
177	case SSPP_DMA2:
178		flushbits = BIT(24);
179		break;
180	case SSPP_DMA3:
181		flushbits = BIT(25);
 
 
 
 
 
 
182		break;
183	case SSPP_CURSOR0:
184		flushbits = BIT(22);
185		break;
186	case SSPP_CURSOR1:
187		flushbits = BIT(23);
188		break;
189	default:
190		break;
191	}
192
193	return flushbits;
194}
195
196static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
197	enum dpu_lm lm)
198{
199	uint32_t flushbits = 0;
200
201	switch (lm) {
202	case LM_0:
203		flushbits = BIT(6);
204		break;
205	case LM_1:
206		flushbits = BIT(7);
207		break;
208	case LM_2:
209		flushbits = BIT(8);
210		break;
211	case LM_3:
212		flushbits = BIT(9);
213		break;
214	case LM_4:
215		flushbits = BIT(10);
216		break;
217	case LM_5:
218		flushbits = BIT(20);
219		break;
220	default:
221		return -EINVAL;
222	}
223
224	flushbits |= CTL_FLUSH_MASK_CTL;
225
226	return flushbits;
227}
228
229static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
230		enum dpu_intf intf)
231{
232	switch (intf) {
233	case INTF_0:
234		ctx->pending_flush_mask |= BIT(31);
235		break;
236	case INTF_1:
237		ctx->pending_flush_mask |= BIT(30);
238		break;
239	case INTF_2:
240		ctx->pending_flush_mask |= BIT(29);
241		break;
242	case INTF_3:
243		ctx->pending_flush_mask |= BIT(28);
244		break;
245	default:
246		break;
247	}
248}
249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
251		enum dpu_intf intf)
252{
253	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
254	ctx->pending_flush_mask |= BIT(INTF_IDX);
255}
256
 
 
 
 
 
 
 
257static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
258		enum dpu_merge_3d merge_3d)
259{
260	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
261	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
262}
263
264static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
265	enum dpu_dspp dspp)
 
 
 
 
 
 
266{
267	uint32_t flushbits = 0;
 
 
268
 
 
 
269	switch (dspp) {
270	case DSPP_0:
271		flushbits = BIT(13);
272		break;
273	case DSPP_1:
274		flushbits = BIT(14);
275		break;
276	case DSPP_2:
277		flushbits = BIT(15);
278		break;
279	case DSPP_3:
280		flushbits = BIT(21);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281		break;
282	default:
283		return 0;
284	}
285
286	return flushbits;
287}
288
289static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
290{
291	struct dpu_hw_blk_reg_map *c = &ctx->hw;
292	ktime_t timeout;
293	u32 status;
294
295	timeout = ktime_add_us(ktime_get(), timeout_us);
296
297	/*
298	 * it takes around 30us to have mdp finish resetting its ctl path
299	 * poll every 50us so that reset should be completed at 1st poll
300	 */
301	do {
302		status = DPU_REG_READ(c, CTL_SW_RESET);
303		status &= 0x1;
304		if (status)
305			usleep_range(20, 50);
306	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
307
308	return status;
309}
310
311static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
312{
313	struct dpu_hw_blk_reg_map *c = &ctx->hw;
314
315	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
316	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
317	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
318		return -EINVAL;
319
320	return 0;
321}
322
323static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
324{
325	struct dpu_hw_blk_reg_map *c = &ctx->hw;
326	u32 status;
327
328	status = DPU_REG_READ(c, CTL_SW_RESET);
329	status &= 0x01;
330	if (!status)
331		return 0;
332
333	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
334	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
335		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
336		return -EINVAL;
337	}
338
339	return 0;
340}
341
342static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
343{
344	struct dpu_hw_blk_reg_map *c = &ctx->hw;
345	int i;
346
347	for (i = 0; i < ctx->mixer_count; i++) {
348		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
349
350		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
351		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
352		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
353		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
354	}
355
356	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
357}
358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
360	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
361{
362	struct dpu_hw_blk_reg_map *c = &ctx->hw;
363	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
364	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
365	int i, j;
366	int stages;
367	int pipes_per_stage;
368
369	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
370	if (stages < 0)
371		return;
372
373	if (test_bit(DPU_MIXER_SOURCESPLIT,
374		&ctx->mixer_hw_caps->features))
375		pipes_per_stage = PIPES_PER_STAGE;
376	else
377		pipes_per_stage = 1;
378
379	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
380
381	if (!stage_cfg)
382		goto exit;
383
384	for (i = 0; i <= stages; i++) {
385		/* overflow to ext register if 'i + 1 > 7' */
386		mix = (i + 1) & 0x7;
387		ext = i >= 7;
 
388
389		for (j = 0 ; j < pipes_per_stage; j++) {
390			enum dpu_sspp_multirect_index rect_index =
391				stage_cfg->multirect_index[i][j];
392
393			switch (stage_cfg->stage[i][j]) {
394			case SSPP_VIG0:
395				if (rect_index == DPU_SSPP_RECT_1) {
396					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
397				} else {
398					mixercfg |= mix << 0;
399					mixercfg_ext |= ext << 0;
400				}
401				break;
402			case SSPP_VIG1:
403				if (rect_index == DPU_SSPP_RECT_1) {
404					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
405				} else {
406					mixercfg |= mix << 3;
407					mixercfg_ext |= ext << 2;
408				}
409				break;
410			case SSPP_VIG2:
411				if (rect_index == DPU_SSPP_RECT_1) {
412					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
413				} else {
414					mixercfg |= mix << 6;
415					mixercfg_ext |= ext << 4;
416				}
417				break;
418			case SSPP_VIG3:
419				if (rect_index == DPU_SSPP_RECT_1) {
420					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
421				} else {
422					mixercfg |= mix << 26;
423					mixercfg_ext |= ext << 6;
424				}
425				break;
426			case SSPP_RGB0:
427				mixercfg |= mix << 9;
428				mixercfg_ext |= ext << 8;
429				break;
430			case SSPP_RGB1:
431				mixercfg |= mix << 12;
432				mixercfg_ext |= ext << 10;
433				break;
434			case SSPP_RGB2:
435				mixercfg |= mix << 15;
436				mixercfg_ext |= ext << 12;
437				break;
438			case SSPP_RGB3:
439				mixercfg |= mix << 29;
440				mixercfg_ext |= ext << 14;
441				break;
442			case SSPP_DMA0:
443				if (rect_index == DPU_SSPP_RECT_1) {
444					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
445				} else {
446					mixercfg |= mix << 18;
447					mixercfg_ext |= ext << 16;
448				}
449				break;
450			case SSPP_DMA1:
451				if (rect_index == DPU_SSPP_RECT_1) {
452					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
453				} else {
454					mixercfg |= mix << 21;
455					mixercfg_ext |= ext << 18;
456				}
457				break;
458			case SSPP_DMA2:
459				if (rect_index == DPU_SSPP_RECT_1) {
460					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
461				} else {
462					mix |= (i + 1) & 0xF;
463					mixercfg_ext2 |= mix << 0;
464				}
465				break;
466			case SSPP_DMA3:
467				if (rect_index == DPU_SSPP_RECT_1) {
468					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
469				} else {
470					mix |= (i + 1) & 0xF;
471					mixercfg_ext2 |= mix << 4;
472				}
473				break;
474			case SSPP_CURSOR0:
475				mixercfg_ext |= ((i + 1) & 0xF) << 20;
476				break;
477			case SSPP_CURSOR1:
478				mixercfg_ext |= ((i + 1) & 0xF) << 26;
479				break;
480			default:
481				break;
482			}
483		}
484	}
485
486exit:
487	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
488	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
489	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
490	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
 
 
491}
492
493
494static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
495		struct dpu_hw_intf_cfg *cfg)
496{
497	struct dpu_hw_blk_reg_map *c = &ctx->hw;
498	u32 intf_active = 0;
 
 
499	u32 mode_sel = 0;
500
 
 
 
 
 
 
 
501	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
502		mode_sel |= BIT(17);
503
504	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
505	intf_active |= BIT(cfg->intf - INTF_0);
 
 
 
 
 
 
 
 
 
 
506
507	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
508	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
 
 
 
509	if (cfg->merge_3d)
510		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
511			      BIT(cfg->merge_3d - MERGE_3D_0));
 
 
 
512}
513
514static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
515		struct dpu_hw_intf_cfg *cfg)
516{
517	struct dpu_hw_blk_reg_map *c = &ctx->hw;
518	u32 intf_cfg = 0;
519
520	intf_cfg |= (cfg->intf & 0xF) << 4;
521
522	if (cfg->mode_3d) {
523		intf_cfg |= BIT(19);
524		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
525	}
526
 
 
 
527	switch (cfg->intf_mode_sel) {
528	case DPU_CTL_MODE_SEL_VID:
529		intf_cfg &= ~BIT(17);
530		intf_cfg &= ~(0x3 << 15);
531		break;
532	case DPU_CTL_MODE_SEL_CMD:
533		intf_cfg |= BIT(17);
534		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
535		break;
536	default:
537		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
538		return;
539	}
540
541	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
542}
543
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
545	unsigned long *fetch_active)
546{
547	int i;
548	u32 val = 0;
549
550	if (fetch_active) {
551		for (i = 0; i < SSPP_MAX; i++) {
552			if (test_bit(i, fetch_active) &&
553				fetch_tbl[i] != CTL_INVALID_BIT)
554				val |= BIT(fetch_tbl[i]);
555		}
556	}
557
558	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
559}
560
561static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
562		unsigned long cap)
563{
564	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
565		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
566		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
 
567		ops->update_pending_flush_intf =
568			dpu_hw_ctl_update_pending_flush_intf_v1;
 
 
 
 
569		ops->update_pending_flush_merge_3d =
570			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
 
 
 
 
571	} else {
572		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
573		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
574		ops->update_pending_flush_intf =
575			dpu_hw_ctl_update_pending_flush_intf;
 
 
576	}
577	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
578	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
579	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
580	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
581	ops->trigger_start = dpu_hw_ctl_trigger_start;
 
582	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
583	ops->reset = dpu_hw_ctl_reset_control;
584	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
585	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
586	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
587	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
588	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
589	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
 
 
 
 
590	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
591		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
592};
593
594struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
595		void __iomem *addr,
596		const struct dpu_mdss_cfg *m)
 
 
597{
598	struct dpu_hw_ctl *c;
599	const struct dpu_ctl_cfg *cfg;
600
601	c = kzalloc(sizeof(*c), GFP_KERNEL);
602	if (!c)
603		return ERR_PTR(-ENOMEM);
604
605	cfg = _ctl_offset(idx, m, addr, &c->hw);
606	if (IS_ERR_OR_NULL(cfg)) {
607		kfree(c);
608		pr_err("failed to create dpu_hw_ctl %d\n", idx);
609		return ERR_PTR(-EINVAL);
610	}
611
612	c->caps = cfg;
613	_setup_ctl_ops(&c->ops, c->caps->features);
614	c->idx = idx;
615	c->mixer_count = m->mixer_count;
616	c->mixer_hw_caps = m->mixer;
617
618	return c;
619}
620
621void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
622{
623	kfree(ctx);
624}