Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  3 */
  4
  5#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
  6
  7#include <linux/debugfs.h>
  8#include <linux/delay.h>
  9
 10#include "dpu_vbif.h"
 11#include "dpu_hw_vbif.h"
 12#include "dpu_trace.h"
 13
 14static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
 15{
 16	if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
 17		return dpu_kms->hw_vbif[vbif_idx];
 18
 19	return NULL;
 20}
 21
 22static const char *dpu_vbif_name(enum dpu_vbif idx)
 23{
 24	switch (idx) {
 25	case VBIF_RT:
 26		return "VBIF_RT";
 27	case VBIF_NRT:
 28		return "VBIF_NRT";
 29	default:
 30		return "??";
 31	}
 32}
 33
 34/**
 35 * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
 36 * @vbif:	Pointer to hardware vbif driver
 37 * @xin_id:	Client interface identifier
 38 * @return:	0 if success; error code otherwise
 39 */
 40static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
 41{
 42	ktime_t timeout;
 43	bool status;
 44	int rc;
 45
 46	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
 47		DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
 48		return -EINVAL;
 49	}
 50
 51	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
 52	for (;;) {
 53		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
 54		if (status)
 55			break;
 56		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
 57			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
 58			break;
 59		}
 60		usleep_range(501, 1000);
 61	}
 62
 63	if (!status) {
 64		rc = -ETIMEDOUT;
 65		DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
 66				dpu_vbif_name(vbif->idx), xin_id);
 67	} else {
 68		rc = 0;
 69		DRM_DEBUG_ATOMIC("%s client %d is halted\n",
 70				dpu_vbif_name(vbif->idx), xin_id);
 71	}
 72
 73	return rc;
 74}
 75
 76/**
 77 * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
 78 * @vbif:	Pointer to hardware vbif driver
 79 * @ot_lim:	Pointer to OT limit to be modified
 80 * @params:	Pointer to usecase parameters
 81 */
 82static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
 83		u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
 84{
 85	u64 pps;
 86	const struct dpu_vbif_dynamic_ot_tbl *tbl;
 87	u32 i;
 88
 89	if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
 90		return;
 91
 92	/* Dynamic OT setting done only for WFD */
 93	if (!params->is_wfd)
 94		return;
 95
 96	pps = params->frame_rate;
 97	pps *= params->width;
 98	pps *= params->height;
 99
100	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
101			&vbif->cap->dynamic_ot_wr_tbl;
102
103	for (i = 0; i < tbl->count; i++) {
104		if (pps <= tbl->cfg[i].pps) {
105			*ot_lim = tbl->cfg[i].ot_limit;
106			break;
107		}
108	}
109
110	DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
111			dpu_vbif_name(vbif->idx), params->xin_id,
112			params->width, params->height, params->frame_rate,
113			pps, *ot_lim);
114}
115
116/**
117 * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
118 * @vbif:	Pointer to hardware vbif driver
119 * @params:	Pointer to usecase parameters
120 * @return:	OT limit
121 */
122static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
123	struct dpu_vbif_set_ot_params *params)
124{
125	u32 ot_lim = 0;
126	u32 val;
127
128	if (!vbif || !vbif->cap) {
129		DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
130		return -EINVAL;
131	}
132
133	if (vbif->cap->default_ot_wr_limit && !params->rd)
134		ot_lim = vbif->cap->default_ot_wr_limit;
135	else if (vbif->cap->default_ot_rd_limit && params->rd)
136		ot_lim = vbif->cap->default_ot_rd_limit;
137
138	/*
139	 * If default ot is not set from dt/catalog,
140	 * then do not configure it.
141	 */
142	if (ot_lim == 0)
143		goto exit;
144
145	/* Modify the limits if the target and the use case requires it */
146	_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
147
148	if (vbif && vbif->ops.get_limit_conf) {
149		val = vbif->ops.get_limit_conf(vbif,
150				params->xin_id, params->rd);
151		if (val == ot_lim)
152			ot_lim = 0;
153	}
154
155exit:
156	DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
157			dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
158	return ot_lim;
159}
160
161/**
162 * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
163 * @dpu_kms:	DPU handler
164 * @params:	Pointer to usecase parameters
165 *
166 * Note this function would block waiting for bus halt.
167 */
168void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
169		struct dpu_vbif_set_ot_params *params)
170{
171	struct dpu_hw_vbif *vbif;
172	u32 ot_lim;
173	int ret;
174
175	vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
176	if (!vbif) {
177		DRM_DEBUG_ATOMIC("invalid arguments vbif %d\n", vbif != NULL);
178		return;
179	}
180
181	if (!vbif->ops.set_limit_conf || !vbif->ops.set_halt_ctrl)
182		return;
183
184	/* set write_gather_en for all write clients */
185	if (vbif->ops.set_write_gather_en && !params->rd)
186		vbif->ops.set_write_gather_en(vbif, params->xin_id);
187
188	ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
189
190	if (ot_lim == 0)
191		return;
192
193	trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
194		params->vbif_idx);
195
196	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
197
198	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
199
200	ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
201	if (ret)
202		trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
203
204	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
205}
206
207/**
208 * dpu_vbif_set_qos_remap - set QoS priority level remap
209 * @dpu_kms:	DPU handler
210 * @params:	Pointer to QoS configuration parameters
211 */
212void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
213		struct dpu_vbif_set_qos_params *params)
214{
215	struct dpu_hw_vbif *vbif;
216	const struct dpu_vbif_qos_tbl *qos_tbl;
217	int i;
218
219	if (!params) {
220		DPU_ERROR("invalid arguments\n");
221		return;
222	}
223
224	vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
225
226	if (!vbif || !vbif->cap) {
227		DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
228		return;
229	}
230
231	if (!vbif->ops.set_qos_remap) {
232		DRM_DEBUG_ATOMIC("qos remap not supported\n");
233		return;
234	}
235
236	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
237			&vbif->cap->qos_nrt_tbl;
238
239	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
240		DRM_DEBUG_ATOMIC("qos tbl not defined\n");
241		return;
242	}
243
244	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
245		DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
246				dpu_vbif_name(params->vbif_idx), params->xin_id, i,
247				qos_tbl->priority_lvl[i]);
248		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
249				qos_tbl->priority_lvl[i]);
250	}
251}
252
253/**
254 * dpu_vbif_clear_errors - clear any vbif errors
255 * @dpu_kms:	DPU handler
256 */
257void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
258{
259	struct dpu_hw_vbif *vbif;
260	u32 i, pnd, src;
261
262	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
263		vbif = dpu_kms->hw_vbif[i];
264		if (vbif && vbif->ops.clear_errors) {
265			vbif->ops.clear_errors(vbif, &pnd, &src);
266			if (pnd || src) {
267				DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
268					      dpu_vbif_name(vbif->idx), pnd, src);
269			}
270		}
271	}
272}
273
274/**
275 * dpu_vbif_init_memtypes - initialize xin memory types for vbif
276 * @dpu_kms:	DPU handler
277 */
278void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
279{
280	struct dpu_hw_vbif *vbif;
281	int i, j;
282
283	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
284		vbif = dpu_kms->hw_vbif[i];
285		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
286			for (j = 0; j < vbif->cap->memtype_count; j++)
287				vbif->ops.set_mem_type(
288						vbif, j, vbif->cap->memtype[j]);
289		}
290	}
291}
292
293#ifdef CONFIG_DEBUG_FS
294
295void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
296{
297	char vbif_name[32];
298	struct dentry *entry, *debugfs_vbif;
299	int i, j;
300
301	entry = debugfs_create_dir("vbif", debugfs_root);
302
303	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
304		const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
305
306		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
307
308		debugfs_vbif = debugfs_create_dir(vbif_name, entry);
309
310		debugfs_create_u32("features", 0600, debugfs_vbif,
311			(u32 *)&vbif->features);
312
313		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
314			(u32 *)&vbif->xin_halt_timeout);
315
316		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
317			(u32 *)&vbif->default_ot_rd_limit);
318
319		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
320			(u32 *)&vbif->default_ot_wr_limit);
321
322		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
323			const struct dpu_vbif_dynamic_ot_cfg *cfg =
324					&vbif->dynamic_ot_rd_tbl.cfg[j];
325
326			snprintf(vbif_name, sizeof(vbif_name),
327					"dynamic_ot_rd_%d_pps", j);
328			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
329					(u64 *)&cfg->pps);
330			snprintf(vbif_name, sizeof(vbif_name),
331					"dynamic_ot_rd_%d_ot_limit", j);
332			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
333					(u32 *)&cfg->ot_limit);
334		}
335
336		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
337			const struct dpu_vbif_dynamic_ot_cfg *cfg =
338					&vbif->dynamic_ot_wr_tbl.cfg[j];
339
340			snprintf(vbif_name, sizeof(vbif_name),
341					"dynamic_ot_wr_%d_pps", j);
342			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
343					(u64 *)&cfg->pps);
344			snprintf(vbif_name, sizeof(vbif_name),
345					"dynamic_ot_wr_%d_ot_limit", j);
346			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
347					(u32 *)&cfg->ot_limit);
348		}
349	}
350}
351#endif