Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2019 Intel Corporation
  4 */
  5
  6#include <drm/drm_atomic_state_helper.h>
  7
  8#include "intel_atomic.h"
  9#include "intel_bw.h"
 10#include "intel_cdclk.h"
 11#include "intel_display_types.h"
 12#include "intel_pm.h"
 13#include "intel_sideband.h"
 14
 15/* Parameters for Qclk Geyserville (QGV) */
 16struct intel_qgv_point {
 17	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
 18};
 19
 20struct intel_qgv_info {
 21	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
 22	u8 num_points;
 23	u8 t_bl;
 24};
 25
 26static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
 27					 struct intel_qgv_point *sp,
 28					 int point)
 29{
 30	u32 val = 0, val2 = 0;
 31	int ret;
 32
 33	ret = sandybridge_pcode_read(dev_priv,
 34				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
 35				     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
 36				     &val, &val2);
 37	if (ret)
 38		return ret;
 39
 40	sp->dclk = val & 0xffff;
 41	sp->t_rp = (val & 0xff0000) >> 16;
 42	sp->t_rcd = (val & 0xff000000) >> 24;
 43
 44	sp->t_rdpre = val2 & 0xff;
 45	sp->t_ras = (val2 & 0xff00) >> 8;
 46
 47	sp->t_rc = sp->t_rp + sp->t_ras;
 48
 49	return 0;
 50}
 51
 52int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
 53				  u32 points_mask)
 54{
 55	int ret;
 56
 57	/* bspec says to keep retrying for at least 1 ms */
 58	ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
 59				points_mask,
 60				ICL_PCODE_POINTS_RESTRICTED_MASK,
 61				ICL_PCODE_POINTS_RESTRICTED,
 62				1);
 63
 64	if (ret < 0) {
 65		drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
 66		return ret;
 67	}
 68
 69	return 0;
 70}
 71
 72static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
 73			      struct intel_qgv_info *qi)
 74{
 75	const struct dram_info *dram_info = &dev_priv->dram_info;
 76	int i, ret;
 77
 78	qi->num_points = dram_info->num_qgv_points;
 79
 80	if (DISPLAY_VER(dev_priv) == 12)
 81		switch (dram_info->type) {
 82		case INTEL_DRAM_DDR4:
 83			qi->t_bl = 4;
 84			break;
 85		case INTEL_DRAM_DDR5:
 86			qi->t_bl = 8;
 87			break;
 88		default:
 89			qi->t_bl = 16;
 90			break;
 91		}
 92	else if (DISPLAY_VER(dev_priv) == 11)
 93		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
 94
 95	if (drm_WARN_ON(&dev_priv->drm,
 96			qi->num_points > ARRAY_SIZE(qi->points)))
 97		qi->num_points = ARRAY_SIZE(qi->points);
 98
 99	for (i = 0; i < qi->num_points; i++) {
100		struct intel_qgv_point *sp = &qi->points[i];
101
102		ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
103		if (ret)
104			return ret;
105
106		drm_dbg_kms(&dev_priv->drm,
107			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
108			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
109			    sp->t_rcd, sp->t_rc);
110	}
111
112	return 0;
113}
114
115static int icl_calc_bw(int dclk, int num, int den)
116{
117	/* multiples of 16.666MHz (100/6) */
118	return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
119}
120
121static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
122{
123	u16 dclk = 0;
124	int i;
125
126	for (i = 0; i < qi->num_points; i++)
127		dclk = max(dclk, qi->points[i].dclk);
128
129	return dclk;
130}
131
132struct intel_sa_info {
133	u16 displayrtids;
134	u8 deburst, deprogbwlimit;
135};
136
137static const struct intel_sa_info icl_sa_info = {
138	.deburst = 8,
139	.deprogbwlimit = 25, /* GB/s */
140	.displayrtids = 128,
141};
142
143static const struct intel_sa_info tgl_sa_info = {
144	.deburst = 16,
145	.deprogbwlimit = 34, /* GB/s */
146	.displayrtids = 256,
147};
148
149static const struct intel_sa_info rkl_sa_info = {
150	.deburst = 16,
151	.deprogbwlimit = 20, /* GB/s */
152	.displayrtids = 128,
153};
154
155static const struct intel_sa_info adls_sa_info = {
156	.deburst = 16,
157	.deprogbwlimit = 38, /* GB/s */
158	.displayrtids = 256,
159};
160
161static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
162{
163	struct intel_qgv_info qi = {};
164	bool is_y_tile = true; /* assume y tile may be used */
165	int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
166	int deinterleave;
167	int ipqdepth, ipqdepthpch;
168	int dclk_max;
169	int maxdebw;
170	int i, ret;
171
172	ret = icl_get_qgv_points(dev_priv, &qi);
173	if (ret) {
174		drm_dbg_kms(&dev_priv->drm,
175			    "Failed to get memory subsystem information, ignoring bandwidth limits");
176		return ret;
177	}
178
179	deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
180	dclk_max = icl_sagv_max_dclk(&qi);
181
182	ipqdepthpch = 16;
183
184	maxdebw = min(sa->deprogbwlimit * 1000,
185		      icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
186	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
187
188	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
189		struct intel_bw_info *bi = &dev_priv->max_bw[i];
190		int clpchgroup;
191		int j;
192
193		clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
194		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
195
196		bi->num_qgv_points = qi.num_points;
197
198		for (j = 0; j < qi.num_points; j++) {
199			const struct intel_qgv_point *sp = &qi.points[j];
200			int ct, bw;
201
202			/*
203			 * Max row cycle time
204			 *
205			 * FIXME what is the logic behind the
206			 * assumed burst length?
207			 */
208			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
209				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
210			bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
211
212			bi->deratedbw[j] = min(maxdebw,
213					       bw * 9 / 10); /* 90% */
214
215			drm_dbg_kms(&dev_priv->drm,
216				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
217				    i, j, bi->num_planes, bi->deratedbw[j]);
218		}
219
220		if (bi->num_planes == 1)
221			break;
222	}
223
224	/*
225	 * In case if SAGV is disabled in BIOS, we always get 1
226	 * SAGV point, but we can't send PCode commands to restrict it
227	 * as it will fail and pointless anyway.
228	 */
229	if (qi.num_points == 1)
230		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
231	else
232		dev_priv->sagv_status = I915_SAGV_ENABLED;
233
234	return 0;
235}
236
237static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
238			       int num_planes, int qgv_point)
239{
240	int i;
241
242	/*
243	 * Let's return max bw for 0 planes
244	 */
245	num_planes = max(1, num_planes);
246
247	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
248		const struct intel_bw_info *bi =
249			&dev_priv->max_bw[i];
250
251		/*
252		 * Pcode will not expose all QGV points when
253		 * SAGV is forced to off/min/med/max.
254		 */
255		if (qgv_point >= bi->num_qgv_points)
256			return UINT_MAX;
257
258		if (num_planes >= bi->num_planes)
259			return bi->deratedbw[qgv_point];
260	}
261
262	return 0;
263}
264
265void intel_bw_init_hw(struct drm_i915_private *dev_priv)
266{
267	if (!HAS_DISPLAY(dev_priv))
268		return;
269
270	if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
271		icl_get_bw_info(dev_priv, &adls_sa_info);
272	else if (IS_ROCKETLAKE(dev_priv))
273		icl_get_bw_info(dev_priv, &rkl_sa_info);
274	else if (DISPLAY_VER(dev_priv) == 12)
275		icl_get_bw_info(dev_priv, &tgl_sa_info);
276	else if (DISPLAY_VER(dev_priv) == 11)
277		icl_get_bw_info(dev_priv, &icl_sa_info);
278}
279
280static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
281{
282	/*
283	 * We assume cursors are small enough
284	 * to not not cause bandwidth problems.
285	 */
286	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
287}
288
289static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
290{
291	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
292	unsigned int data_rate = 0;
293	enum plane_id plane_id;
294
295	for_each_plane_id_on_crtc(crtc, plane_id) {
296		/*
297		 * We assume cursors are small enough
298		 * to not not cause bandwidth problems.
299		 */
300		if (plane_id == PLANE_CURSOR)
301			continue;
302
303		data_rate += crtc_state->data_rate[plane_id];
304	}
305
306	return data_rate;
307}
308
309void intel_bw_crtc_update(struct intel_bw_state *bw_state,
310			  const struct intel_crtc_state *crtc_state)
311{
312	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
313	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
314
315	bw_state->data_rate[crtc->pipe] =
316		intel_bw_crtc_data_rate(crtc_state);
317	bw_state->num_active_planes[crtc->pipe] =
318		intel_bw_crtc_num_active_planes(crtc_state);
319
320	drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
321		    pipe_name(crtc->pipe),
322		    bw_state->data_rate[crtc->pipe],
323		    bw_state->num_active_planes[crtc->pipe]);
324}
325
326static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
327					       const struct intel_bw_state *bw_state)
328{
329	unsigned int num_active_planes = 0;
330	enum pipe pipe;
331
332	for_each_pipe(dev_priv, pipe)
333		num_active_planes += bw_state->num_active_planes[pipe];
334
335	return num_active_planes;
336}
337
338static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
339				       const struct intel_bw_state *bw_state)
340{
341	unsigned int data_rate = 0;
342	enum pipe pipe;
343
344	for_each_pipe(dev_priv, pipe)
345		data_rate += bw_state->data_rate[pipe];
346
347	if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active())
348		data_rate = data_rate * 105 / 100;
349
350	return data_rate;
351}
352
353struct intel_bw_state *
354intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
355{
356	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
357	struct intel_global_state *bw_state;
358
359	bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
360
361	return to_intel_bw_state(bw_state);
362}
363
364struct intel_bw_state *
365intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
366{
367	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
368	struct intel_global_state *bw_state;
369
370	bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
371
372	return to_intel_bw_state(bw_state);
373}
374
375struct intel_bw_state *
376intel_atomic_get_bw_state(struct intel_atomic_state *state)
377{
378	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
379	struct intel_global_state *bw_state;
380
381	bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
382	if (IS_ERR(bw_state))
383		return ERR_CAST(bw_state);
384
385	return to_intel_bw_state(bw_state);
386}
387
388int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
389{
390	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
391	struct intel_bw_state *new_bw_state = NULL;
392	struct intel_bw_state *old_bw_state = NULL;
393	const struct intel_crtc_state *crtc_state;
394	struct intel_crtc *crtc;
395	int max_bw = 0;
396	enum pipe pipe;
397	int i;
398
399	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
400		enum plane_id plane_id;
401		struct intel_dbuf_bw *crtc_bw;
402
403		new_bw_state = intel_atomic_get_bw_state(state);
404		if (IS_ERR(new_bw_state))
405			return PTR_ERR(new_bw_state);
406
407		old_bw_state = intel_atomic_get_old_bw_state(state);
408
409		crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
410
411		memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
412
413		if (!crtc_state->hw.active)
414			continue;
415
416		for_each_plane_id_on_crtc(crtc, plane_id) {
417			const struct skl_ddb_entry *plane_alloc =
418				&crtc_state->wm.skl.plane_ddb_y[plane_id];
419			const struct skl_ddb_entry *uv_plane_alloc =
420				&crtc_state->wm.skl.plane_ddb_uv[plane_id];
421			unsigned int data_rate = crtc_state->data_rate[plane_id];
422			unsigned int dbuf_mask = 0;
423			enum dbuf_slice slice;
424
425			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
426			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
427
428			/*
429			 * FIXME: To calculate that more properly we probably
430			 * need to to split per plane data_rate into data_rate_y
431			 * and data_rate_uv for multiplanar formats in order not
432			 * to get accounted those twice if they happen to reside
433			 * on different slices.
434			 * However for pre-icl this would work anyway because
435			 * we have only single slice and for icl+ uv plane has
436			 * non-zero data rate.
437			 * So in worst case those calculation are a bit
438			 * pessimistic, which shouldn't pose any significant
439			 * problem anyway.
440			 */
441			for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask)
442				crtc_bw->used_bw[slice] += data_rate;
443		}
444	}
445
446	if (!old_bw_state)
447		return 0;
448
449	for_each_pipe(dev_priv, pipe) {
450		struct intel_dbuf_bw *crtc_bw;
451		enum dbuf_slice slice;
452
453		crtc_bw = &new_bw_state->dbuf_bw[pipe];
454
455		for_each_dbuf_slice(dev_priv, slice) {
456			/*
457			 * Current experimental observations show that contrary
458			 * to BSpec we get underruns once we exceed 64 * CDCLK
459			 * for slices in total.
460			 * As a temporary measure in order not to keep CDCLK
461			 * bumped up all the time we calculate CDCLK according
462			 * to this formula for  overall bw consumed by slices.
463			 */
464			max_bw += crtc_bw->used_bw[slice];
465		}
466	}
467
468	new_bw_state->min_cdclk = max_bw / 64;
469
470	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
471		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
472
473		if (ret)
474			return ret;
475	}
476
477	return 0;
478}
479
480int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
481{
482	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
483	struct intel_bw_state *new_bw_state = NULL;
484	struct intel_bw_state *old_bw_state = NULL;
485	const struct intel_crtc_state *crtc_state;
486	struct intel_crtc *crtc;
487	int min_cdclk = 0;
488	enum pipe pipe;
489	int i;
490
491	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
492		new_bw_state = intel_atomic_get_bw_state(state);
493		if (IS_ERR(new_bw_state))
494			return PTR_ERR(new_bw_state);
495
496		old_bw_state = intel_atomic_get_old_bw_state(state);
497	}
498
499	if (!old_bw_state)
500		return 0;
501
502	for_each_pipe(dev_priv, pipe) {
503		struct intel_cdclk_state *cdclk_state;
504
505		cdclk_state = intel_atomic_get_new_cdclk_state(state);
506		if (!cdclk_state)
507			return 0;
508
509		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
510	}
511
512	new_bw_state->min_cdclk = min_cdclk;
513
514	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
515		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
516
517		if (ret)
518			return ret;
519	}
520
521	return 0;
522}
523
524int intel_bw_atomic_check(struct intel_atomic_state *state)
525{
526	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
527	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
528	struct intel_bw_state *new_bw_state = NULL;
529	const struct intel_bw_state *old_bw_state = NULL;
530	unsigned int data_rate;
531	unsigned int num_active_planes;
532	struct intel_crtc *crtc;
533	int i, ret;
534	u32 allowed_points = 0;
535	unsigned int max_bw_point = 0, max_bw = 0;
536	unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
537	u32 mask = (1 << num_qgv_points) - 1;
538
539	/* FIXME earlier gens need some checks too */
540	if (DISPLAY_VER(dev_priv) < 11)
541		return 0;
542
543	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
544					    new_crtc_state, i) {
545		unsigned int old_data_rate =
546			intel_bw_crtc_data_rate(old_crtc_state);
547		unsigned int new_data_rate =
548			intel_bw_crtc_data_rate(new_crtc_state);
549		unsigned int old_active_planes =
550			intel_bw_crtc_num_active_planes(old_crtc_state);
551		unsigned int new_active_planes =
552			intel_bw_crtc_num_active_planes(new_crtc_state);
553
554		/*
555		 * Avoid locking the bw state when
556		 * nothing significant has changed.
557		 */
558		if (old_data_rate == new_data_rate &&
559		    old_active_planes == new_active_planes)
560			continue;
561
562		new_bw_state = intel_atomic_get_bw_state(state);
563		if (IS_ERR(new_bw_state))
564			return PTR_ERR(new_bw_state);
565
566		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
567		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
568
569		drm_dbg_kms(&dev_priv->drm,
570			    "pipe %c data rate %u num active planes %u\n",
571			    pipe_name(crtc->pipe),
572			    new_bw_state->data_rate[crtc->pipe],
573			    new_bw_state->num_active_planes[crtc->pipe]);
574	}
575
576	if (!new_bw_state)
577		return 0;
578
579	ret = intel_atomic_lock_global_state(&new_bw_state->base);
580	if (ret)
581		return ret;
582
583	data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
584	data_rate = DIV_ROUND_UP(data_rate, 1000);
585
586	num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
587
588	for (i = 0; i < num_qgv_points; i++) {
589		unsigned int max_data_rate;
590
591		max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
592		/*
593		 * We need to know which qgv point gives us
594		 * maximum bandwidth in order to disable SAGV
595		 * if we find that we exceed SAGV block time
596		 * with watermarks. By that moment we already
597		 * have those, as it is calculated earlier in
598		 * intel_atomic_check,
599		 */
600		if (max_data_rate > max_bw) {
601			max_bw_point = i;
602			max_bw = max_data_rate;
603		}
604		if (max_data_rate >= data_rate)
605			allowed_points |= BIT(i);
606		drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
607			    i, max_data_rate, data_rate);
608	}
609
610	/*
611	 * BSpec states that we always should have at least one allowed point
612	 * left, so if we couldn't - simply reject the configuration for obvious
613	 * reasons.
614	 */
615	if (allowed_points == 0) {
616		drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
617			    " bandwidth %d for display configuration(%d active planes).\n",
618			    data_rate, num_active_planes);
619		return -EINVAL;
620	}
621
622	/*
623	 * Leave only single point with highest bandwidth, if
624	 * we can't enable SAGV due to the increased memory latency it may
625	 * cause.
626	 */
627	if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
628		allowed_points = BIT(max_bw_point);
629		drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
630			    max_bw_point);
631	}
632	/*
633	 * We store the ones which need to be masked as that is what PCode
634	 * actually accepts as a parameter.
635	 */
636	new_bw_state->qgv_points_mask = ~allowed_points & mask;
637
638	old_bw_state = intel_atomic_get_old_bw_state(state);
639	/*
640	 * If the actual mask had changed we need to make sure that
641	 * the commits are serialized(in case this is a nomodeset, nonblocking)
642	 */
643	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
644		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
645		if (ret)
646			return ret;
647	}
648
649	return 0;
650}
651
652static struct intel_global_state *
653intel_bw_duplicate_state(struct intel_global_obj *obj)
654{
655	struct intel_bw_state *state;
656
657	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
658	if (!state)
659		return NULL;
660
661	return &state->base;
662}
663
664static void intel_bw_destroy_state(struct intel_global_obj *obj,
665				   struct intel_global_state *state)
666{
667	kfree(state);
668}
669
670static const struct intel_global_state_funcs intel_bw_funcs = {
671	.atomic_duplicate_state = intel_bw_duplicate_state,
672	.atomic_destroy_state = intel_bw_destroy_state,
673};
674
675int intel_bw_init(struct drm_i915_private *dev_priv)
676{
677	struct intel_bw_state *state;
678
679	state = kzalloc(sizeof(*state), GFP_KERNEL);
680	if (!state)
681		return -ENOMEM;
682
683	intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
684				     &state->base, &intel_bw_funcs);
685
686	return 0;
687}