Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include "i915_drv.h"
  7#include "intel_dram.h"
  8#include "intel_sideband.h"
  9
 10struct dram_dimm_info {
 11	u16 size;
 12	u8 width, ranks;
 13};
 14
 15struct dram_channel_info {
 16	struct dram_dimm_info dimm_l, dimm_s;
 17	u8 ranks;
 18	bool is_16gb_dimm;
 19};
 20
 21#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
 22
 23static const char *intel_dram_type_str(enum intel_dram_type type)
 24{
 25	static const char * const str[] = {
 26		DRAM_TYPE_STR(UNKNOWN),
 27		DRAM_TYPE_STR(DDR3),
 28		DRAM_TYPE_STR(DDR4),
 29		DRAM_TYPE_STR(LPDDR3),
 30		DRAM_TYPE_STR(LPDDR4),
 31	};
 32
 33	if (type >= ARRAY_SIZE(str))
 34		type = INTEL_DRAM_UNKNOWN;
 35
 36	return str[type];
 37}
 38
 39#undef DRAM_TYPE_STR
 40
 41static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
 42{
 43	return dimm->ranks * 64 / (dimm->width ?: 1);
 44}
 45
 46/* Returns total Gb for the whole DIMM */
 47static int skl_get_dimm_size(u16 val)
 48{
 49	return (val & SKL_DRAM_SIZE_MASK) * 8;
 50}
 51
 52static int skl_get_dimm_width(u16 val)
 53{
 54	if (skl_get_dimm_size(val) == 0)
 55		return 0;
 56
 57	switch (val & SKL_DRAM_WIDTH_MASK) {
 58	case SKL_DRAM_WIDTH_X8:
 59	case SKL_DRAM_WIDTH_X16:
 60	case SKL_DRAM_WIDTH_X32:
 61		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
 62		return 8 << val;
 63	default:
 64		MISSING_CASE(val);
 65		return 0;
 66	}
 67}
 68
 69static int skl_get_dimm_ranks(u16 val)
 70{
 71	if (skl_get_dimm_size(val) == 0)
 72		return 0;
 73
 74	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
 75
 76	return val + 1;
 77}
 78
 79/* Returns total Gb for the whole DIMM */
 80static int cnl_get_dimm_size(u16 val)
 81{
 82	return (val & CNL_DRAM_SIZE_MASK) * 8 / 2;
 83}
 84
 85static int cnl_get_dimm_width(u16 val)
 86{
 87	if (cnl_get_dimm_size(val) == 0)
 88		return 0;
 89
 90	switch (val & CNL_DRAM_WIDTH_MASK) {
 91	case CNL_DRAM_WIDTH_X8:
 92	case CNL_DRAM_WIDTH_X16:
 93	case CNL_DRAM_WIDTH_X32:
 94		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
 95		return 8 << val;
 96	default:
 97		MISSING_CASE(val);
 98		return 0;
 99	}
100}
101
102static int cnl_get_dimm_ranks(u16 val)
103{
104	if (cnl_get_dimm_size(val) == 0)
105		return 0;
106
107	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
108
109	return val + 1;
110}
111
112static bool
113skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
114{
115	/* Convert total Gb to Gb per DRAM device */
116	return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
117}
118
119static void
120skl_dram_get_dimm_info(struct drm_i915_private *i915,
121		       struct dram_dimm_info *dimm,
122		       int channel, char dimm_name, u16 val)
123{
124	if (GRAPHICS_VER(i915) >= 10) {
125		dimm->size = cnl_get_dimm_size(val);
126		dimm->width = cnl_get_dimm_width(val);
127		dimm->ranks = cnl_get_dimm_ranks(val);
128	} else {
129		dimm->size = skl_get_dimm_size(val);
130		dimm->width = skl_get_dimm_width(val);
131		dimm->ranks = skl_get_dimm_ranks(val);
132	}
133
134	drm_dbg_kms(&i915->drm,
135		    "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
136		    channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
137		    yesno(skl_is_16gb_dimm(dimm)));
138}
139
140static int
141skl_dram_get_channel_info(struct drm_i915_private *i915,
142			  struct dram_channel_info *ch,
143			  int channel, u32 val)
144{
145	skl_dram_get_dimm_info(i915, &ch->dimm_l,
146			       channel, 'L', val & 0xffff);
147	skl_dram_get_dimm_info(i915, &ch->dimm_s,
148			       channel, 'S', val >> 16);
149
150	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
151		drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
152		return -EINVAL;
153	}
154
155	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
156		ch->ranks = 2;
157	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
158		ch->ranks = 2;
159	else
160		ch->ranks = 1;
161
162	ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
163		skl_is_16gb_dimm(&ch->dimm_s);
164
165	drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
166		    channel, ch->ranks, yesno(ch->is_16gb_dimm));
167
168	return 0;
169}
170
171static bool
172intel_is_dram_symmetric(const struct dram_channel_info *ch0,
173			const struct dram_channel_info *ch1)
174{
175	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
176		(ch0->dimm_s.size == 0 ||
177		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
178}
179
180static int
181skl_dram_get_channels_info(struct drm_i915_private *i915)
182{
183	struct dram_info *dram_info = &i915->dram_info;
184	struct dram_channel_info ch0 = {}, ch1 = {};
185	u32 val;
186	int ret;
187
188	val = intel_uncore_read(&i915->uncore,
189				SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
190	ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
191	if (ret == 0)
192		dram_info->num_channels++;
193
194	val = intel_uncore_read(&i915->uncore,
195				SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
196	ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
197	if (ret == 0)
198		dram_info->num_channels++;
199
200	if (dram_info->num_channels == 0) {
201		drm_info(&i915->drm, "Number of memory channels is zero\n");
202		return -EINVAL;
203	}
204
205	if (ch0.ranks == 0 && ch1.ranks == 0) {
 
 
 
 
 
 
 
 
 
 
206		drm_info(&i915->drm, "couldn't get memory rank information\n");
207		return -EINVAL;
208	}
209
210	dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
211
212	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
213
214	drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
215		    yesno(dram_info->symmetric_memory));
216
217	return 0;
218}
219
220static enum intel_dram_type
221skl_get_dram_type(struct drm_i915_private *i915)
222{
223	u32 val;
224
225	val = intel_uncore_read(&i915->uncore,
226				SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
227
228	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
229	case SKL_DRAM_DDR_TYPE_DDR3:
230		return INTEL_DRAM_DDR3;
231	case SKL_DRAM_DDR_TYPE_DDR4:
232		return INTEL_DRAM_DDR4;
233	case SKL_DRAM_DDR_TYPE_LPDDR3:
234		return INTEL_DRAM_LPDDR3;
235	case SKL_DRAM_DDR_TYPE_LPDDR4:
236		return INTEL_DRAM_LPDDR4;
237	default:
238		MISSING_CASE(val);
239		return INTEL_DRAM_UNKNOWN;
240	}
241}
242
243static int
244skl_get_dram_info(struct drm_i915_private *i915)
245{
246	struct dram_info *dram_info = &i915->dram_info;
247	u32 mem_freq_khz, val;
248	int ret;
249
250	dram_info->type = skl_get_dram_type(i915);
251	drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
252		    intel_dram_type_str(dram_info->type));
253
254	ret = skl_dram_get_channels_info(i915);
255	if (ret)
256		return ret;
257
258	val = intel_uncore_read(&i915->uncore,
259				SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
260	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
261				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
262
263	if (dram_info->num_channels * mem_freq_khz == 0) {
 
 
 
264		drm_info(&i915->drm,
265			 "Couldn't get system memory bandwidth\n");
266		return -EINVAL;
267	}
268
 
269	return 0;
270}
271
272/* Returns Gb per DRAM device */
273static int bxt_get_dimm_size(u32 val)
274{
275	switch (val & BXT_DRAM_SIZE_MASK) {
276	case BXT_DRAM_SIZE_4GBIT:
277		return 4;
278	case BXT_DRAM_SIZE_6GBIT:
279		return 6;
280	case BXT_DRAM_SIZE_8GBIT:
281		return 8;
282	case BXT_DRAM_SIZE_12GBIT:
283		return 12;
284	case BXT_DRAM_SIZE_16GBIT:
285		return 16;
286	default:
287		MISSING_CASE(val);
288		return 0;
289	}
290}
291
292static int bxt_get_dimm_width(u32 val)
293{
294	if (!bxt_get_dimm_size(val))
295		return 0;
296
297	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
298
299	return 8 << val;
300}
301
302static int bxt_get_dimm_ranks(u32 val)
303{
304	if (!bxt_get_dimm_size(val))
305		return 0;
306
307	switch (val & BXT_DRAM_RANK_MASK) {
308	case BXT_DRAM_RANK_SINGLE:
309		return 1;
310	case BXT_DRAM_RANK_DUAL:
311		return 2;
312	default:
313		MISSING_CASE(val);
314		return 0;
315	}
316}
317
318static enum intel_dram_type bxt_get_dimm_type(u32 val)
319{
320	if (!bxt_get_dimm_size(val))
321		return INTEL_DRAM_UNKNOWN;
322
323	switch (val & BXT_DRAM_TYPE_MASK) {
324	case BXT_DRAM_TYPE_DDR3:
325		return INTEL_DRAM_DDR3;
326	case BXT_DRAM_TYPE_LPDDR3:
327		return INTEL_DRAM_LPDDR3;
328	case BXT_DRAM_TYPE_DDR4:
329		return INTEL_DRAM_DDR4;
330	case BXT_DRAM_TYPE_LPDDR4:
331		return INTEL_DRAM_LPDDR4;
332	default:
333		MISSING_CASE(val);
334		return INTEL_DRAM_UNKNOWN;
335	}
336}
337
338static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
339{
340	dimm->width = bxt_get_dimm_width(val);
341	dimm->ranks = bxt_get_dimm_ranks(val);
342
343	/*
344	 * Size in register is Gb per DRAM device. Convert to total
345	 * Gb to match the way we report this for non-LP platforms.
346	 */
347	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
348}
349
350static int bxt_get_dram_info(struct drm_i915_private *i915)
351{
352	struct dram_info *dram_info = &i915->dram_info;
353	u32 dram_channels;
354	u32 mem_freq_khz, val;
355	u8 num_active_channels, valid_ranks = 0;
356	int i;
357
358	val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
359	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
360				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
361
362	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
363	num_active_channels = hweight32(dram_channels);
364
365	if (mem_freq_khz * num_active_channels == 0) {
 
 
 
366		drm_info(&i915->drm,
367			 "Couldn't get system memory bandwidth\n");
368		return -EINVAL;
369	}
370
371	/*
372	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
373	 */
374	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
375		struct dram_dimm_info dimm;
376		enum intel_dram_type type;
377
378		val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
379		if (val == 0xFFFFFFFF)
380			continue;
381
382		dram_info->num_channels++;
383
384		bxt_get_dimm_info(&dimm, val);
385		type = bxt_get_dimm_type(val);
386
387		drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
388			    dram_info->type != INTEL_DRAM_UNKNOWN &&
389			    dram_info->type != type);
390
391		drm_dbg_kms(&i915->drm,
392			    "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
393			    i - BXT_D_CR_DRP0_DUNIT_START,
394			    dimm.size, dimm.width, dimm.ranks,
395			    intel_dram_type_str(type));
396
397		if (valid_ranks == 0)
398			valid_ranks = dimm.ranks;
 
 
 
 
 
 
 
399
400		if (type != INTEL_DRAM_UNKNOWN)
401			dram_info->type = type;
402	}
403
404	if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
405		drm_info(&i915->drm, "couldn't get memory information\n");
406		return -EINVAL;
407	}
408
409	return 0;
410}
411
412static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
413{
414	struct dram_info *dram_info = &dev_priv->dram_info;
415	u32 val = 0;
416	int ret;
417
418	ret = sandybridge_pcode_read(dev_priv,
419				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
420				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
421				     &val, NULL);
422	if (ret)
423		return ret;
424
425	if (GRAPHICS_VER(dev_priv) == 12) {
426		switch (val & 0xf) {
427		case 0:
428			dram_info->type = INTEL_DRAM_DDR4;
429			break;
430		case 1:
431			dram_info->type = INTEL_DRAM_DDR5;
432			break;
433		case 2:
434			dram_info->type = INTEL_DRAM_LPDDR5;
435			break;
436		case 3:
437			dram_info->type = INTEL_DRAM_LPDDR4;
438			break;
439		case 4:
440			dram_info->type = INTEL_DRAM_DDR3;
441			break;
442		case 5:
443			dram_info->type = INTEL_DRAM_LPDDR3;
444			break;
445		default:
446			MISSING_CASE(val & 0xf);
447			return -1;
448		}
449	} else {
450		switch (val & 0xf) {
451		case 0:
452			dram_info->type = INTEL_DRAM_DDR4;
453			break;
454		case 1:
455			dram_info->type = INTEL_DRAM_DDR3;
456			break;
457		case 2:
458			dram_info->type = INTEL_DRAM_LPDDR3;
459			break;
460		case 3:
461			dram_info->type = INTEL_DRAM_LPDDR4;
462			break;
463		default:
464			MISSING_CASE(val & 0xf);
465			return -1;
466		}
467	}
468
469	dram_info->num_channels = (val & 0xf0) >> 4;
470	dram_info->num_qgv_points = (val & 0xf00) >> 8;
471
472	return 0;
473}
474
475static int gen11_get_dram_info(struct drm_i915_private *i915)
476{
477	int ret = skl_get_dram_info(i915);
478
479	if (ret)
480		return ret;
481
482	return icl_pcode_read_mem_global_info(i915);
483}
484
485static int gen12_get_dram_info(struct drm_i915_private *i915)
486{
487	/* Always needed for GEN12+ */
488	i915->dram_info.wm_lv_0_adjust_needed = true;
489
490	return icl_pcode_read_mem_global_info(i915);
491}
492
493void intel_dram_detect(struct drm_i915_private *i915)
494{
495	struct dram_info *dram_info = &i915->dram_info;
496	int ret;
497
498	/*
499	 * Assume level 0 watermark latency adjustment is needed until proven
500	 * otherwise, this w/a is not needed by bxt/glk.
 
501	 */
502	dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
503
504	if (GRAPHICS_VER(i915) < 9 || !HAS_DISPLAY(i915))
505		return;
506
507	if (GRAPHICS_VER(i915) >= 12)
508		ret = gen12_get_dram_info(i915);
509	else if (GRAPHICS_VER(i915) >= 11)
510		ret = gen11_get_dram_info(i915);
511	else if (IS_GEN9_LP(i915))
512		ret = bxt_get_dram_info(i915);
513	else
514		ret = skl_get_dram_info(i915);
515	if (ret)
516		return;
517
518	drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
 
519
520	drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
521		    yesno(dram_info->wm_lv_0_adjust_needed));
522}
523
524static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
525{
526	static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
527	static const u8 sets[4] = { 1, 1, 2, 2 };
528
529	return EDRAM_NUM_BANKS(cap) *
530		ways[EDRAM_WAYS_IDX(cap)] *
531		sets[EDRAM_SETS_IDX(cap)];
532}
533
534void intel_dram_edram_detect(struct drm_i915_private *i915)
535{
536	u32 edram_cap = 0;
537
538	if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
539		return;
540
541	edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
542
543	/* NB: We can't write IDICR yet because we don't have gt funcs set up */
544
545	if (!(edram_cap & EDRAM_ENABLED))
546		return;
547
548	/*
549	 * The needed capability bits for size calculation are not there with
550	 * pre gen9 so return 128MB always.
551	 */
552	if (GRAPHICS_VER(i915) < 9)
553		i915->edram_size_mb = 128;
554	else
555		i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
556
557	drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
558}
v5.9
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2020 Intel Corporation
  4 */
  5
  6#include "i915_drv.h"
  7#include "intel_dram.h"
 
  8
  9struct dram_dimm_info {
 10	u8 size, width, ranks;
 
 11};
 12
 13struct dram_channel_info {
 14	struct dram_dimm_info dimm_l, dimm_s;
 15	u8 ranks;
 16	bool is_16gb_dimm;
 17};
 18
 19#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
 20
 21static const char *intel_dram_type_str(enum intel_dram_type type)
 22{
 23	static const char * const str[] = {
 24		DRAM_TYPE_STR(UNKNOWN),
 25		DRAM_TYPE_STR(DDR3),
 26		DRAM_TYPE_STR(DDR4),
 27		DRAM_TYPE_STR(LPDDR3),
 28		DRAM_TYPE_STR(LPDDR4),
 29	};
 30
 31	if (type >= ARRAY_SIZE(str))
 32		type = INTEL_DRAM_UNKNOWN;
 33
 34	return str[type];
 35}
 36
 37#undef DRAM_TYPE_STR
 38
 39static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
 40{
 41	return dimm->ranks * 64 / (dimm->width ?: 1);
 42}
 43
 44/* Returns total GB for the whole DIMM */
 45static int skl_get_dimm_size(u16 val)
 46{
 47	return val & SKL_DRAM_SIZE_MASK;
 48}
 49
 50static int skl_get_dimm_width(u16 val)
 51{
 52	if (skl_get_dimm_size(val) == 0)
 53		return 0;
 54
 55	switch (val & SKL_DRAM_WIDTH_MASK) {
 56	case SKL_DRAM_WIDTH_X8:
 57	case SKL_DRAM_WIDTH_X16:
 58	case SKL_DRAM_WIDTH_X32:
 59		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
 60		return 8 << val;
 61	default:
 62		MISSING_CASE(val);
 63		return 0;
 64	}
 65}
 66
 67static int skl_get_dimm_ranks(u16 val)
 68{
 69	if (skl_get_dimm_size(val) == 0)
 70		return 0;
 71
 72	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
 73
 74	return val + 1;
 75}
 76
 77/* Returns total GB for the whole DIMM */
 78static int cnl_get_dimm_size(u16 val)
 79{
 80	return (val & CNL_DRAM_SIZE_MASK) / 2;
 81}
 82
 83static int cnl_get_dimm_width(u16 val)
 84{
 85	if (cnl_get_dimm_size(val) == 0)
 86		return 0;
 87
 88	switch (val & CNL_DRAM_WIDTH_MASK) {
 89	case CNL_DRAM_WIDTH_X8:
 90	case CNL_DRAM_WIDTH_X16:
 91	case CNL_DRAM_WIDTH_X32:
 92		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
 93		return 8 << val;
 94	default:
 95		MISSING_CASE(val);
 96		return 0;
 97	}
 98}
 99
100static int cnl_get_dimm_ranks(u16 val)
101{
102	if (cnl_get_dimm_size(val) == 0)
103		return 0;
104
105	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
106
107	return val + 1;
108}
109
110static bool
111skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
112{
113	/* Convert total GB to Gb per DRAM device */
114	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
115}
116
117static void
118skl_dram_get_dimm_info(struct drm_i915_private *i915,
119		       struct dram_dimm_info *dimm,
120		       int channel, char dimm_name, u16 val)
121{
122	if (INTEL_GEN(i915) >= 10) {
123		dimm->size = cnl_get_dimm_size(val);
124		dimm->width = cnl_get_dimm_width(val);
125		dimm->ranks = cnl_get_dimm_ranks(val);
126	} else {
127		dimm->size = skl_get_dimm_size(val);
128		dimm->width = skl_get_dimm_width(val);
129		dimm->ranks = skl_get_dimm_ranks(val);
130	}
131
132	drm_dbg_kms(&i915->drm,
133		    "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
134		    channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
135		    yesno(skl_is_16gb_dimm(dimm)));
136}
137
138static int
139skl_dram_get_channel_info(struct drm_i915_private *i915,
140			  struct dram_channel_info *ch,
141			  int channel, u32 val)
142{
143	skl_dram_get_dimm_info(i915, &ch->dimm_l,
144			       channel, 'L', val & 0xffff);
145	skl_dram_get_dimm_info(i915, &ch->dimm_s,
146			       channel, 'S', val >> 16);
147
148	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
149		drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
150		return -EINVAL;
151	}
152
153	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
154		ch->ranks = 2;
155	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
156		ch->ranks = 2;
157	else
158		ch->ranks = 1;
159
160	ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
161		skl_is_16gb_dimm(&ch->dimm_s);
162
163	drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
164		    channel, ch->ranks, yesno(ch->is_16gb_dimm));
165
166	return 0;
167}
168
169static bool
170intel_is_dram_symmetric(const struct dram_channel_info *ch0,
171			const struct dram_channel_info *ch1)
172{
173	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
174		(ch0->dimm_s.size == 0 ||
175		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
176}
177
178static int
179skl_dram_get_channels_info(struct drm_i915_private *i915)
180{
181	struct dram_info *dram_info = &i915->dram_info;
182	struct dram_channel_info ch0 = {}, ch1 = {};
183	u32 val;
184	int ret;
185
186	val = intel_uncore_read(&i915->uncore,
187				SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
188	ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
189	if (ret == 0)
190		dram_info->num_channels++;
191
192	val = intel_uncore_read(&i915->uncore,
193				SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
194	ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
195	if (ret == 0)
196		dram_info->num_channels++;
197
198	if (dram_info->num_channels == 0) {
199		drm_info(&i915->drm, "Number of memory channels is zero\n");
200		return -EINVAL;
201	}
202
203	/*
204	 * If any of the channel is single rank channel, worst case output
205	 * will be same as if single rank memory, so consider single rank
206	 * memory.
207	 */
208	if (ch0.ranks == 1 || ch1.ranks == 1)
209		dram_info->ranks = 1;
210	else
211		dram_info->ranks = max(ch0.ranks, ch1.ranks);
212
213	if (dram_info->ranks == 0) {
214		drm_info(&i915->drm, "couldn't get memory rank information\n");
215		return -EINVAL;
216	}
217
218	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
219
220	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
221
222	drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
223		    yesno(dram_info->symmetric_memory));
224
225	return 0;
226}
227
228static enum intel_dram_type
229skl_get_dram_type(struct drm_i915_private *i915)
230{
231	u32 val;
232
233	val = intel_uncore_read(&i915->uncore,
234				SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
235
236	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
237	case SKL_DRAM_DDR_TYPE_DDR3:
238		return INTEL_DRAM_DDR3;
239	case SKL_DRAM_DDR_TYPE_DDR4:
240		return INTEL_DRAM_DDR4;
241	case SKL_DRAM_DDR_TYPE_LPDDR3:
242		return INTEL_DRAM_LPDDR3;
243	case SKL_DRAM_DDR_TYPE_LPDDR4:
244		return INTEL_DRAM_LPDDR4;
245	default:
246		MISSING_CASE(val);
247		return INTEL_DRAM_UNKNOWN;
248	}
249}
250
251static int
252skl_get_dram_info(struct drm_i915_private *i915)
253{
254	struct dram_info *dram_info = &i915->dram_info;
255	u32 mem_freq_khz, val;
256	int ret;
257
258	dram_info->type = skl_get_dram_type(i915);
259	drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
260		    intel_dram_type_str(dram_info->type));
261
262	ret = skl_dram_get_channels_info(i915);
263	if (ret)
264		return ret;
265
266	val = intel_uncore_read(&i915->uncore,
267				SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
268	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
269				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
270
271	dram_info->bandwidth_kbps = dram_info->num_channels *
272		mem_freq_khz * 8;
273
274	if (dram_info->bandwidth_kbps == 0) {
275		drm_info(&i915->drm,
276			 "Couldn't get system memory bandwidth\n");
277		return -EINVAL;
278	}
279
280	dram_info->valid = true;
281	return 0;
282}
283
284/* Returns Gb per DRAM device */
285static int bxt_get_dimm_size(u32 val)
286{
287	switch (val & BXT_DRAM_SIZE_MASK) {
288	case BXT_DRAM_SIZE_4GBIT:
289		return 4;
290	case BXT_DRAM_SIZE_6GBIT:
291		return 6;
292	case BXT_DRAM_SIZE_8GBIT:
293		return 8;
294	case BXT_DRAM_SIZE_12GBIT:
295		return 12;
296	case BXT_DRAM_SIZE_16GBIT:
297		return 16;
298	default:
299		MISSING_CASE(val);
300		return 0;
301	}
302}
303
304static int bxt_get_dimm_width(u32 val)
305{
306	if (!bxt_get_dimm_size(val))
307		return 0;
308
309	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
310
311	return 8 << val;
312}
313
314static int bxt_get_dimm_ranks(u32 val)
315{
316	if (!bxt_get_dimm_size(val))
317		return 0;
318
319	switch (val & BXT_DRAM_RANK_MASK) {
320	case BXT_DRAM_RANK_SINGLE:
321		return 1;
322	case BXT_DRAM_RANK_DUAL:
323		return 2;
324	default:
325		MISSING_CASE(val);
326		return 0;
327	}
328}
329
330static enum intel_dram_type bxt_get_dimm_type(u32 val)
331{
332	if (!bxt_get_dimm_size(val))
333		return INTEL_DRAM_UNKNOWN;
334
335	switch (val & BXT_DRAM_TYPE_MASK) {
336	case BXT_DRAM_TYPE_DDR3:
337		return INTEL_DRAM_DDR3;
338	case BXT_DRAM_TYPE_LPDDR3:
339		return INTEL_DRAM_LPDDR3;
340	case BXT_DRAM_TYPE_DDR4:
341		return INTEL_DRAM_DDR4;
342	case BXT_DRAM_TYPE_LPDDR4:
343		return INTEL_DRAM_LPDDR4;
344	default:
345		MISSING_CASE(val);
346		return INTEL_DRAM_UNKNOWN;
347	}
348}
349
350static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
351{
352	dimm->width = bxt_get_dimm_width(val);
353	dimm->ranks = bxt_get_dimm_ranks(val);
354
355	/*
356	 * Size in register is Gb per DRAM device. Convert to total
357	 * GB to match the way we report this for non-LP platforms.
358	 */
359	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
360}
361
362static int bxt_get_dram_info(struct drm_i915_private *i915)
363{
364	struct dram_info *dram_info = &i915->dram_info;
365	u32 dram_channels;
366	u32 mem_freq_khz, val;
367	u8 num_active_channels;
368	int i;
369
370	val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
371	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
372				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
373
374	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
375	num_active_channels = hweight32(dram_channels);
376
377	/* Each active bit represents 4-byte channel */
378	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
379
380	if (dram_info->bandwidth_kbps == 0) {
381		drm_info(&i915->drm,
382			 "Couldn't get system memory bandwidth\n");
383		return -EINVAL;
384	}
385
386	/*
387	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
388	 */
389	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
390		struct dram_dimm_info dimm;
391		enum intel_dram_type type;
392
393		val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
394		if (val == 0xFFFFFFFF)
395			continue;
396
397		dram_info->num_channels++;
398
399		bxt_get_dimm_info(&dimm, val);
400		type = bxt_get_dimm_type(val);
401
402		drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
403			    dram_info->type != INTEL_DRAM_UNKNOWN &&
404			    dram_info->type != type);
405
406		drm_dbg_kms(&i915->drm,
407			    "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
408			    i - BXT_D_CR_DRP0_DUNIT_START,
409			    dimm.size, dimm.width, dimm.ranks,
410			    intel_dram_type_str(type));
411
412		/*
413		 * If any of the channel is single rank channel,
414		 * worst case output will be same as if single rank
415		 * memory, so consider single rank memory.
416		 */
417		if (dram_info->ranks == 0)
418			dram_info->ranks = dimm.ranks;
419		else if (dimm.ranks == 1)
420			dram_info->ranks = 1;
421
422		if (type != INTEL_DRAM_UNKNOWN)
423			dram_info->type = type;
424	}
425
426	if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
427		drm_info(&i915->drm, "couldn't get memory information\n");
428		return -EINVAL;
429	}
430
431	dram_info->valid = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
433	return 0;
434}
435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436void intel_dram_detect(struct drm_i915_private *i915)
437{
438	struct dram_info *dram_info = &i915->dram_info;
439	int ret;
440
441	/*
442	 * Assume 16Gb DIMMs are present until proven otherwise.
443	 * This is only used for the level 0 watermark latency
444	 * w/a which does not apply to bxt/glk.
445	 */
446	dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
447
448	if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
449		return;
450
451	if (IS_GEN9_LP(i915))
 
 
 
 
452		ret = bxt_get_dram_info(i915);
453	else
454		ret = skl_get_dram_info(i915);
455	if (ret)
456		return;
457
458	drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
459		    dram_info->bandwidth_kbps, dram_info->num_channels);
460
461	drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
462		    dram_info->ranks, yesno(dram_info->is_16gb_dimm));
463}
464
465static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
466{
467	static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
468	static const u8 sets[4] = { 1, 1, 2, 2 };
469
470	return EDRAM_NUM_BANKS(cap) *
471		ways[EDRAM_WAYS_IDX(cap)] *
472		sets[EDRAM_SETS_IDX(cap)];
473}
474
475void intel_dram_edram_detect(struct drm_i915_private *i915)
476{
477	u32 edram_cap = 0;
478
479	if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
480		return;
481
482	edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
483
484	/* NB: We can't write IDICR yet because we don't have gt funcs set up */
485
486	if (!(edram_cap & EDRAM_ENABLED))
487		return;
488
489	/*
490	 * The needed capability bits for size calculation are not there with
491	 * pre gen9 so return 128MB always.
492	 */
493	if (INTEL_GEN(i915) < 9)
494		i915->edram_size_mb = 128;
495	else
496		i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
497
498	drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
499}