Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
  4 *		processor hardware monitoring
  5 *
  6 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
  7 * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
  8 *
  9 * Implementation notes:
 10 * - CCD register address information as well as the calculation to
 11 *   convert raw register values is from https://github.com/ocerman/zenpower.
 12 *   The information is not confirmed from chip datasheets, but experiments
 13 *   suggest that it provides reasonable temperature values.
 14 */
 15
 16#include <linux/bitops.h>
 17#include <linux/err.h>
 18#include <linux/hwmon.h>
 
 19#include <linux/init.h>
 20#include <linux/module.h>
 21#include <linux/pci.h>
 22#include <linux/pci_ids.h>
 23#include <asm/amd_nb.h>
 24#include <asm/processor.h>
 25
 26MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
 27MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 28MODULE_LICENSE("GPL");
 29
 30static bool force;
 31module_param(force, bool, 0444);
 32MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
 33
 34/* Provide lock for writing to NB_SMU_IND_ADDR */
 35static DEFINE_MUTEX(nb_smu_ind_mutex);
 36
 37#ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
 38#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3	0x15b3
 39#endif
 40
 41/* CPUID function 0x80000001, ebx */
 42#define CPUID_PKGTYPE_MASK	GENMASK(31, 28)
 43#define CPUID_PKGTYPE_F		0x00000000
 44#define CPUID_PKGTYPE_AM2R2_AM3	0x10000000
 45
 46/* DRAM controller (PCI function 2) */
 47#define REG_DCT0_CONFIG_HIGH		0x094
 48#define  DDR3_MODE			BIT(8)
 49
 50/* miscellaneous (PCI function 3) */
 51#define REG_HARDWARE_THERMAL_CONTROL	0x64
 52#define  HTC_ENABLE			BIT(0)
 53
 54#define REG_REPORTED_TEMPERATURE	0xa4
 55
 56#define REG_NORTHBRIDGE_CAPABILITIES	0xe8
 57#define  NB_CAP_HTC			BIT(10)
 58
 59/*
 60 * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
 61 * and REG_REPORTED_TEMPERATURE have been moved to
 62 * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
 63 * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
 64 */
 65#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET	0xd8200c64
 66#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET	0xd8200ca4
 67
 68/* Common for Zen CPU families (Family 17h and 18h and 19h and 1Ah) */
 69#define ZEN_REPORTED_TEMP_CTRL_BASE		0x00059800
 70
 71#define ZEN_CCD_TEMP(offset, x)			(ZEN_REPORTED_TEMP_CTRL_BASE + \
 72						 (offset) + ((x) * 4))
 73#define ZEN_CCD_TEMP_VALID			BIT(11)
 74#define ZEN_CCD_TEMP_MASK			GENMASK(10, 0)
 75
 76#define ZEN_CUR_TEMP_SHIFT			21
 77#define ZEN_CUR_TEMP_RANGE_SEL_MASK		BIT(19)
 78#define ZEN_CUR_TEMP_TJ_SEL_MASK		GENMASK(17, 16)
 79
 80/*
 81 * AMD's Industrial processor 3255 supports temperature from -40 deg to 105 deg Celsius.
 82 * Use the model name to identify 3255 CPUs and set a flag to display negative temperature.
 83 * Do not round off to zero for negative Tctl or Tdie values if the flag is set
 84 */
 85#define AMD_I3255_STR				"3255"
 86
 87struct k10temp_data {
 88	struct pci_dev *pdev;
 89	void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
 90	void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
 91	int temp_offset;
 92	u32 temp_adjust_mask;
 93	u32 show_temp;
 94	bool is_zen;
 95	u32 ccd_offset;
 96	bool disp_negative;
 97};
 98
 99#define TCTL_BIT	0
100#define TDIE_BIT	1
101#define TCCD_BIT(x)	((x) + 2)
102
103#define HAVE_TEMP(d, channel)	((d)->show_temp & BIT(channel))
104#define HAVE_TDIE(d)		HAVE_TEMP(d, TDIE_BIT)
105
106struct tctl_offset {
107	u8 model;
108	char const *id;
109	int offset;
110};
111
112static const struct tctl_offset tctl_offset_table[] = {
113	{ 0x17, "AMD Ryzen 5 1600X", 20000 },
114	{ 0x17, "AMD Ryzen 7 1700X", 20000 },
115	{ 0x17, "AMD Ryzen 7 1800X", 20000 },
116	{ 0x17, "AMD Ryzen 7 2700X", 10000 },
117	{ 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
118	{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
119};
120
121static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
122{
123	pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
124}
125
126static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
127{
128	pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
129}
130
131static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
132			      unsigned int base, int offset, u32 *val)
133{
134	mutex_lock(&nb_smu_ind_mutex);
135	pci_bus_write_config_dword(pdev->bus, devfn,
136				   base, offset);
137	pci_bus_read_config_dword(pdev->bus, devfn,
138				  base + 4, val);
139	mutex_unlock(&nb_smu_ind_mutex);
140}
141
142static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
143{
144	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
145			  F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
146}
147
148static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
149{
150	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
151			  F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
152}
153
154static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
155{
156	amd_smn_read(amd_pci_dev_to_node_id(pdev),
157		     ZEN_REPORTED_TEMP_CTRL_BASE, regval);
158}
159
160static long get_raw_temp(struct k10temp_data *data)
161{
 
162	u32 regval;
163	long temp;
164
165	data->read_tempreg(data->pdev, &regval);
166	temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
167	if ((regval & data->temp_adjust_mask) ||
168	    (regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK)
169		temp -= 49000;
170	return temp;
171}
172
173static const char *k10temp_temp_label[] = {
174	"Tctl",
175	"Tdie",
176	"Tccd1",
177	"Tccd2",
178	"Tccd3",
179	"Tccd4",
180	"Tccd5",
181	"Tccd6",
182	"Tccd7",
183	"Tccd8",
184	"Tccd9",
185	"Tccd10",
186	"Tccd11",
187	"Tccd12",
188};
189
190static int k10temp_read_labels(struct device *dev,
191			       enum hwmon_sensor_types type,
192			       u32 attr, int channel, const char **str)
193{
194	switch (type) {
195	case hwmon_temp:
196		*str = k10temp_temp_label[channel];
197		break;
198	default:
199		return -EOPNOTSUPP;
200	}
201	return 0;
202}
203
204static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
205			     long *val)
206{
207	struct k10temp_data *data = dev_get_drvdata(dev);
208	u32 regval;
209
210	switch (attr) {
211	case hwmon_temp_input:
212		switch (channel) {
213		case 0:		/* Tctl */
214			*val = get_raw_temp(data);
215			if (*val < 0 && !data->disp_negative)
216				*val = 0;
217			break;
218		case 1:		/* Tdie */
219			*val = get_raw_temp(data) - data->temp_offset;
220			if (*val < 0 && !data->disp_negative)
221				*val = 0;
222			break;
223		case 2 ... 13:		/* Tccd{1-12} */
224			amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
225				     ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
226						  &regval);
227			*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
228			break;
229		default:
230			return -EOPNOTSUPP;
231		}
232		break;
233	case hwmon_temp_max:
234		*val = 70 * 1000;
235		break;
236	case hwmon_temp_crit:
237		data->read_htcreg(data->pdev, &regval);
238		*val = ((regval >> 16) & 0x7f) * 500 + 52000;
239		break;
240	case hwmon_temp_crit_hyst:
241		data->read_htcreg(data->pdev, &regval);
242		*val = (((regval >> 16) & 0x7f)
243			- ((regval >> 24) & 0xf)) * 500 + 52000;
244		break;
245	default:
246		return -EOPNOTSUPP;
247	}
248	return 0;
249}
250
251static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
252			u32 attr, int channel, long *val)
253{
254	switch (type) {
255	case hwmon_temp:
256		return k10temp_read_temp(dev, attr, channel, val);
257	default:
258		return -EOPNOTSUPP;
259	}
260}
261
262static umode_t k10temp_is_visible(const void *_data,
263				  enum hwmon_sensor_types type,
264				  u32 attr, int channel)
265{
266	const struct k10temp_data *data = _data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267	struct pci_dev *pdev = data->pdev;
268	u32 reg;
269
270	switch (type) {
271	case hwmon_temp:
272		switch (attr) {
273		case hwmon_temp_input:
274			if (!HAVE_TEMP(data, channel))
275				return 0;
276			break;
277		case hwmon_temp_max:
278			if (channel || data->is_zen)
279				return 0;
280			break;
281		case hwmon_temp_crit:
282		case hwmon_temp_crit_hyst:
283			if (channel || !data->read_htcreg)
284				return 0;
285
286			pci_read_config_dword(pdev,
287					      REG_NORTHBRIDGE_CAPABILITIES,
288					      &reg);
289			if (!(reg & NB_CAP_HTC))
290				return 0;
291
292			data->read_htcreg(data->pdev, &reg);
293			if (!(reg & HTC_ENABLE))
294				return 0;
295			break;
296		case hwmon_temp_label:
297			/* Show temperature labels only on Zen CPUs */
298			if (!data->is_zen || !HAVE_TEMP(data, channel))
299				return 0;
300			break;
301		default:
302			return 0;
303		}
304		break;
305	default:
306		return 0;
307	}
308	return 0444;
309}
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311static bool has_erratum_319(struct pci_dev *pdev)
312{
313	u32 pkg_type, reg_dram_cfg;
314
315	if (boot_cpu_data.x86 != 0x10)
316		return false;
317
318	/*
319	 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
320	 *              may be unreliable.
321	 */
322	pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
323	if (pkg_type == CPUID_PKGTYPE_F)
324		return true;
325	if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
326		return false;
327
328	/* DDR3 memory implies socket AM3, which is good */
329	pci_bus_read_config_dword(pdev->bus,
330				  PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
331				  REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
332	if (reg_dram_cfg & DDR3_MODE)
333		return false;
334
335	/*
336	 * Unfortunately it is possible to run a socket AM3 CPU with DDR2
337	 * memory. We blacklist all the cores which do exist in socket AM2+
338	 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
339	 * and AM3 formats, but that's the best we can do.
340	 */
341	return boot_cpu_data.x86_model < 4 ||
342	       (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
343}
344
345static const struct hwmon_channel_info * const k10temp_info[] = {
346	HWMON_CHANNEL_INFO(temp,
347			   HWMON_T_INPUT | HWMON_T_MAX |
348			   HWMON_T_CRIT | HWMON_T_CRIT_HYST |
349			   HWMON_T_LABEL,
350			   HWMON_T_INPUT | HWMON_T_LABEL,
351			   HWMON_T_INPUT | HWMON_T_LABEL,
352			   HWMON_T_INPUT | HWMON_T_LABEL,
353			   HWMON_T_INPUT | HWMON_T_LABEL,
354			   HWMON_T_INPUT | HWMON_T_LABEL,
355			   HWMON_T_INPUT | HWMON_T_LABEL,
356			   HWMON_T_INPUT | HWMON_T_LABEL,
357			   HWMON_T_INPUT | HWMON_T_LABEL,
358			   HWMON_T_INPUT | HWMON_T_LABEL,
359			   HWMON_T_INPUT | HWMON_T_LABEL,
360			   HWMON_T_INPUT | HWMON_T_LABEL,
361			   HWMON_T_INPUT | HWMON_T_LABEL,
362			   HWMON_T_INPUT | HWMON_T_LABEL),
363	NULL
364};
365
366static const struct hwmon_ops k10temp_hwmon_ops = {
367	.is_visible = k10temp_is_visible,
368	.read = k10temp_read,
369	.read_string = k10temp_read_labels,
370};
371
372static const struct hwmon_chip_info k10temp_chip_info = {
373	.ops = &k10temp_hwmon_ops,
374	.info = k10temp_info,
375};
376
377static void k10temp_get_ccd_support(struct pci_dev *pdev,
378				    struct k10temp_data *data, int limit)
379{
380	u32 regval;
381	int i;
382
383	for (i = 0; i < limit; i++) {
384		amd_smn_read(amd_pci_dev_to_node_id(pdev),
385			     ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
386		if (regval & ZEN_CCD_TEMP_VALID)
387			data->show_temp |= BIT(TCCD_BIT(i));
388	}
389}
390
391static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
392{
393	int unreliable = has_erratum_319(pdev);
394	struct device *dev = &pdev->dev;
395	struct k10temp_data *data;
396	struct device *hwmon_dev;
397	int i;
398
399	if (unreliable) {
400		if (!force) {
401			dev_err(dev,
402				"unreliable CPU thermal sensor; monitoring disabled\n");
403			return -ENODEV;
404		}
405		dev_warn(dev,
406			 "unreliable CPU thermal sensor; check erratum 319\n");
407	}
408
409	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
410	if (!data)
411		return -ENOMEM;
412
413	data->pdev = pdev;
414	data->show_temp |= BIT(TCTL_BIT);	/* Always show Tctl */
415
416	if (boot_cpu_data.x86 == 0x17 &&
417	    strstr(boot_cpu_data.x86_model_id, AMD_I3255_STR)) {
418		data->disp_negative = true;
419	}
420
421	if (boot_cpu_data.x86 == 0x15 &&
422	    ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
423	     (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
424		data->read_htcreg = read_htcreg_nb_f15;
425		data->read_tempreg = read_tempreg_nb_f15;
426	} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
427		data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
428		data->read_tempreg = read_tempreg_nb_zen;
429		data->is_zen = true;
430
431		switch (boot_cpu_data.x86_model) {
432		case 0x1:	/* Zen */
433		case 0x8:	/* Zen+ */
434		case 0x11:	/* Zen APU */
435		case 0x18:	/* Zen+ APU */
436			data->ccd_offset = 0x154;
437			k10temp_get_ccd_support(pdev, data, 4);
438			break;
439		case 0x31:	/* Zen2 Threadripper */
440		case 0x60:	/* Renoir */
441		case 0x68:	/* Lucienne */
442		case 0x71:	/* Zen2 */
443			data->ccd_offset = 0x154;
444			k10temp_get_ccd_support(pdev, data, 8);
445			break;
446		case 0xa0 ... 0xaf:
447			data->ccd_offset = 0x300;
448			k10temp_get_ccd_support(pdev, data, 8);
449			break;
450		}
451	} else if (boot_cpu_data.x86 == 0x19) {
452		data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
453		data->read_tempreg = read_tempreg_nb_zen;
454		data->is_zen = true;
455
456		switch (boot_cpu_data.x86_model) {
457		case 0x0 ... 0x1:	/* Zen3 SP3/TR */
458		case 0x8:		/* Zen3 TR Chagall */
459		case 0x21:		/* Zen3 Ryzen Desktop */
460		case 0x50 ... 0x5f:	/* Green Sardine */
461			data->ccd_offset = 0x154;
462			k10temp_get_ccd_support(pdev, data, 8);
463			break;
464		case 0x40 ... 0x4f:	/* Yellow Carp */
465			data->ccd_offset = 0x300;
466			k10temp_get_ccd_support(pdev, data, 8);
467			break;
468		case 0x60 ... 0x6f:
469		case 0x70 ... 0x7f:
470			data->ccd_offset = 0x308;
471			k10temp_get_ccd_support(pdev, data, 8);
472			break;
473		case 0x10 ... 0x1f:
474		case 0xa0 ... 0xaf:
475			data->ccd_offset = 0x300;
476			k10temp_get_ccd_support(pdev, data, 12);
477			break;
478		}
479	} else if (boot_cpu_data.x86 == 0x1a) {
480		data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
481		data->read_tempreg = read_tempreg_nb_zen;
482		data->is_zen = true;
483	} else {
484		data->read_htcreg = read_htcreg_pci;
485		data->read_tempreg = read_tempreg_pci;
486	}
487
488	for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
489		const struct tctl_offset *entry = &tctl_offset_table[i];
490
491		if (boot_cpu_data.x86 == entry->model &&
492		    strstr(boot_cpu_data.x86_model_id, entry->id)) {
493			data->show_temp |= BIT(TDIE_BIT);	/* show Tdie */
494			data->temp_offset = entry->offset;
495			break;
496		}
497	}
498
499	hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
500							 &k10temp_chip_info,
501							 NULL);
502	return PTR_ERR_OR_ZERO(hwmon_dev);
503}
504
505static const struct pci_device_id k10temp_id_table[] = {
506	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
507	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
508	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
509	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
510	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
511	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
512	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
513	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) },
514	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
515	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
516	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
517	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
518	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
519	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
520	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
521	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
522	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
523	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
524	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
525	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
526	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
527	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
528	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
529	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
530	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
531	{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
532	{}
533};
534MODULE_DEVICE_TABLE(pci, k10temp_id_table);
535
536static struct pci_driver k10temp_driver = {
537	.name = "k10temp",
538	.id_table = k10temp_id_table,
539	.probe = k10temp_probe,
540};
541
542module_pci_driver(k10temp_driver);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
 
  4 *
  5 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
 
 
 
 
 
 
 
  6 */
  7
 
  8#include <linux/err.h>
  9#include <linux/hwmon.h>
 10#include <linux/hwmon-sysfs.h>
 11#include <linux/init.h>
 12#include <linux/module.h>
 13#include <linux/pci.h>
 14#include <linux/pci_ids.h>
 15#include <asm/amd_nb.h>
 16#include <asm/processor.h>
 17
 18MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
 19MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 20MODULE_LICENSE("GPL");
 21
 22static bool force;
 23module_param(force, bool, 0444);
 24MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
 25
 26/* Provide lock for writing to NB_SMU_IND_ADDR */
 27static DEFINE_MUTEX(nb_smu_ind_mutex);
 28
 29#ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
 30#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3	0x15b3
 31#endif
 32
 33/* CPUID function 0x80000001, ebx */
 34#define CPUID_PKGTYPE_MASK	0xf0000000
 35#define CPUID_PKGTYPE_F		0x00000000
 36#define CPUID_PKGTYPE_AM2R2_AM3	0x10000000
 37
 38/* DRAM controller (PCI function 2) */
 39#define REG_DCT0_CONFIG_HIGH		0x094
 40#define  DDR3_MODE			0x00000100
 41
 42/* miscellaneous (PCI function 3) */
 43#define REG_HARDWARE_THERMAL_CONTROL	0x64
 44#define  HTC_ENABLE			0x00000001
 45
 46#define REG_REPORTED_TEMPERATURE	0xa4
 47
 48#define REG_NORTHBRIDGE_CAPABILITIES	0xe8
 49#define  NB_CAP_HTC			0x00000400
 50
 51/*
 52 * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
 53 * and REG_REPORTED_TEMPERATURE have been moved to
 54 * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
 55 * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
 56 */
 57#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET	0xd8200c64
 58#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET	0xd8200ca4
 59
 60/* F17h M01h Access througn SMN */
 61#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET	0x00059800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63struct k10temp_data {
 64	struct pci_dev *pdev;
 65	void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
 66	void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
 67	int temp_offset;
 68	u32 temp_adjust_mask;
 69	bool show_tdie;
 
 
 
 70};
 71
 
 
 
 
 
 
 
 72struct tctl_offset {
 73	u8 model;
 74	char const *id;
 75	int offset;
 76};
 77
 78static const struct tctl_offset tctl_offset_table[] = {
 79	{ 0x17, "AMD Ryzen 5 1600X", 20000 },
 80	{ 0x17, "AMD Ryzen 7 1700X", 20000 },
 81	{ 0x17, "AMD Ryzen 7 1800X", 20000 },
 82	{ 0x17, "AMD Ryzen 7 2700X", 10000 },
 83	{ 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
 84	{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
 85};
 86
 87static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
 88{
 89	pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
 90}
 91
 92static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
 93{
 94	pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
 95}
 96
 97static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
 98			      unsigned int base, int offset, u32 *val)
 99{
100	mutex_lock(&nb_smu_ind_mutex);
101	pci_bus_write_config_dword(pdev->bus, devfn,
102				   base, offset);
103	pci_bus_read_config_dword(pdev->bus, devfn,
104				  base + 4, val);
105	mutex_unlock(&nb_smu_ind_mutex);
106}
107
108static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
109{
110	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
111			  F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
112}
113
114static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
115{
116	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
117			  F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
118}
119
120static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
121{
122	amd_smn_read(amd_pci_dev_to_node_id(pdev),
123		     F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
124}
125
126static unsigned int get_raw_temp(struct k10temp_data *data)
127{
128	unsigned int temp;
129	u32 regval;
 
130
131	data->read_tempreg(data->pdev, &regval);
132	temp = (regval >> 21) * 125;
133	if (regval & data->temp_adjust_mask)
 
134		temp -= 49000;
135	return temp;
136}
137
138static ssize_t temp1_input_show(struct device *dev,
139				struct device_attribute *attr, char *buf)
140{
141	struct k10temp_data *data = dev_get_drvdata(dev);
142	unsigned int temp = get_raw_temp(data);
 
 
 
 
 
 
 
 
 
 
 
143
144	if (temp > data->temp_offset)
145		temp -= data->temp_offset;
146	else
147		temp = 0;
148
149	return sprintf(buf, "%u\n", temp);
 
 
 
 
 
 
150}
151
152static ssize_t temp2_input_show(struct device *dev,
153				struct device_attribute *devattr, char *buf)
154{
155	struct k10temp_data *data = dev_get_drvdata(dev);
156	unsigned int temp = get_raw_temp(data);
157
158	return sprintf(buf, "%u\n", temp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159}
160
161static ssize_t temp_label_show(struct device *dev,
162			       struct device_attribute *devattr, char *buf)
163{
164	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
165
166	return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
 
 
 
167}
168
169static ssize_t temp1_max_show(struct device *dev,
170			      struct device_attribute *attr, char *buf)
 
171{
172	return sprintf(buf, "%d\n", 70 * 1000);
173}
174
175static ssize_t temp_crit_show(struct device *dev,
176			      struct device_attribute *devattr, char *buf)
177{
178	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
179	struct k10temp_data *data = dev_get_drvdata(dev);
180	int show_hyst = attr->index;
181	u32 regval;
182	int value;
183
184	data->read_htcreg(data->pdev, &regval);
185	value = ((regval >> 16) & 0x7f) * 500 + 52000;
186	if (show_hyst)
187		value -= ((regval >> 24) & 0xf) * 500;
188	return sprintf(buf, "%d\n", value);
189}
190
191static DEVICE_ATTR_RO(temp1_input);
192static DEVICE_ATTR_RO(temp1_max);
193static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
194static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
195
196static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
197static DEVICE_ATTR_RO(temp2_input);
198static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
199
200static umode_t k10temp_is_visible(struct kobject *kobj,
201				  struct attribute *attr, int index)
202{
203	struct device *dev = container_of(kobj, struct device, kobj);
204	struct k10temp_data *data = dev_get_drvdata(dev);
205	struct pci_dev *pdev = data->pdev;
206	u32 reg;
207
208	switch (index) {
209	case 0 ... 1:	/* temp1_input, temp1_max */
210	default:
211		break;
212	case 2 ... 3:	/* temp1_crit, temp1_crit_hyst */
213		if (!data->read_htcreg)
214			return 0;
215
216		pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
217				      &reg);
218		if (!(reg & NB_CAP_HTC))
219			return 0;
220
221		data->read_htcreg(data->pdev, &reg);
222		if (!(reg & HTC_ENABLE))
223			return 0;
224		break;
225	case 4 ... 6:	/* temp1_label, temp2_input, temp2_label */
226		if (!data->show_tdie)
 
 
 
 
 
 
 
 
 
 
 
 
 
227			return 0;
 
228		break;
 
 
229	}
230	return attr->mode;
231}
232
233static struct attribute *k10temp_attrs[] = {
234	&dev_attr_temp1_input.attr,
235	&dev_attr_temp1_max.attr,
236	&sensor_dev_attr_temp1_crit.dev_attr.attr,
237	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
238	&sensor_dev_attr_temp1_label.dev_attr.attr,
239	&dev_attr_temp2_input.attr,
240	&sensor_dev_attr_temp2_label.dev_attr.attr,
241	NULL
242};
243
244static const struct attribute_group k10temp_group = {
245	.attrs = k10temp_attrs,
246	.is_visible = k10temp_is_visible,
247};
248__ATTRIBUTE_GROUPS(k10temp);
249
250static bool has_erratum_319(struct pci_dev *pdev)
251{
252	u32 pkg_type, reg_dram_cfg;
253
254	if (boot_cpu_data.x86 != 0x10)
255		return false;
256
257	/*
258	 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
259	 *              may be unreliable.
260	 */
261	pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
262	if (pkg_type == CPUID_PKGTYPE_F)
263		return true;
264	if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
265		return false;
266
267	/* DDR3 memory implies socket AM3, which is good */
268	pci_bus_read_config_dword(pdev->bus,
269				  PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
270				  REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
271	if (reg_dram_cfg & DDR3_MODE)
272		return false;
273
274	/*
275	 * Unfortunately it is possible to run a socket AM3 CPU with DDR2
276	 * memory. We blacklist all the cores which do exist in socket AM2+
277	 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
278	 * and AM3 formats, but that's the best we can do.
279	 */
280	return boot_cpu_data.x86_model < 4 ||
281	       (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
282}
283
284static int k10temp_probe(struct pci_dev *pdev,
285				   const struct pci_device_id *id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286{
287	int unreliable = has_erratum_319(pdev);
288	struct device *dev = &pdev->dev;
289	struct k10temp_data *data;
290	struct device *hwmon_dev;
291	int i;
292
293	if (unreliable) {
294		if (!force) {
295			dev_err(dev,
296				"unreliable CPU thermal sensor; monitoring disabled\n");
297			return -ENODEV;
298		}
299		dev_warn(dev,
300			 "unreliable CPU thermal sensor; check erratum 319\n");
301	}
302
303	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
304	if (!data)
305		return -ENOMEM;
306
307	data->pdev = pdev;
 
 
 
 
 
 
308
309	if (boot_cpu_data.x86 == 0x15 &&
310	    ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
311	     (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
312		data->read_htcreg = read_htcreg_nb_f15;
313		data->read_tempreg = read_tempreg_nb_f15;
314	} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
315		data->temp_adjust_mask = 0x80000;
316		data->read_tempreg = read_tempreg_nb_f17;
317		data->show_tdie = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318	} else {
319		data->read_htcreg = read_htcreg_pci;
320		data->read_tempreg = read_tempreg_pci;
321	}
322
323	for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
324		const struct tctl_offset *entry = &tctl_offset_table[i];
325
326		if (boot_cpu_data.x86 == entry->model &&
327		    strstr(boot_cpu_data.x86_model_id, entry->id)) {
 
328			data->temp_offset = entry->offset;
329			break;
330		}
331	}
332
333	hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
334							   k10temp_groups);
 
335	return PTR_ERR_OR_ZERO(hwmon_dev);
336}
337
338static const struct pci_device_id k10temp_id_table[] = {
339	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
340	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
341	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
342	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
343	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
344	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
345	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
346	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) },
347	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
348	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
349	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
350	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
351	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 
352	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 
 
 
 
 
 
 
 
 
 
353	{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
354	{}
355};
356MODULE_DEVICE_TABLE(pci, k10temp_id_table);
357
358static struct pci_driver k10temp_driver = {
359	.name = "k10temp",
360	.id_table = k10temp_id_table,
361	.probe = k10temp_probe,
362};
363
364module_pci_driver(k10temp_driver);