Loading...
1/*
2 * k10temp.c - AMD Family 10h/11h/12h/14h/15h processor hardware monitoring
3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 *
6 *
7 * This driver is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This driver is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 * See the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this driver; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/err.h>
21#include <linux/hwmon.h>
22#include <linux/hwmon-sysfs.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <asm/processor.h>
27
28MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL");
31
32static bool force;
33module_param(force, bool, 0444);
34MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
35
36/* CPUID function 0x80000001, ebx */
37#define CPUID_PKGTYPE_MASK 0xf0000000
38#define CPUID_PKGTYPE_F 0x00000000
39#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
40
41/* DRAM controller (PCI function 2) */
42#define REG_DCT0_CONFIG_HIGH 0x094
43#define DDR3_MODE 0x00000100
44
45/* miscellaneous (PCI function 3) */
46#define REG_HARDWARE_THERMAL_CONTROL 0x64
47#define HTC_ENABLE 0x00000001
48
49#define REG_REPORTED_TEMPERATURE 0xa4
50
51#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
52#define NB_CAP_HTC 0x00000400
53
54static ssize_t show_temp(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 u32 regval;
58
59 pci_read_config_dword(to_pci_dev(dev),
60 REG_REPORTED_TEMPERATURE, ®val);
61 return sprintf(buf, "%u\n", (regval >> 21) * 125);
62}
63
64static ssize_t show_temp_max(struct device *dev,
65 struct device_attribute *attr, char *buf)
66{
67 return sprintf(buf, "%d\n", 70 * 1000);
68}
69
70static ssize_t show_temp_crit(struct device *dev,
71 struct device_attribute *devattr, char *buf)
72{
73 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
74 int show_hyst = attr->index;
75 u32 regval;
76 int value;
77
78 pci_read_config_dword(to_pci_dev(dev),
79 REG_HARDWARE_THERMAL_CONTROL, ®val);
80 value = ((regval >> 16) & 0x7f) * 500 + 52000;
81 if (show_hyst)
82 value -= ((regval >> 24) & 0xf) * 500;
83 return sprintf(buf, "%d\n", value);
84}
85
86static ssize_t show_name(struct device *dev,
87 struct device_attribute *attr, char *buf)
88{
89 return sprintf(buf, "k10temp\n");
90}
91
92static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
93static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
94static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
95static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
96static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
97
98static bool __devinit has_erratum_319(struct pci_dev *pdev)
99{
100 u32 pkg_type, reg_dram_cfg;
101
102 if (boot_cpu_data.x86 != 0x10)
103 return false;
104
105 /*
106 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
107 * may be unreliable.
108 */
109 pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
110 if (pkg_type == CPUID_PKGTYPE_F)
111 return true;
112 if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
113 return false;
114
115 /* DDR3 memory implies socket AM3, which is good */
116 pci_bus_read_config_dword(pdev->bus,
117 PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
118 REG_DCT0_CONFIG_HIGH, ®_dram_cfg);
119 if (reg_dram_cfg & DDR3_MODE)
120 return false;
121
122 /*
123 * Unfortunately it is possible to run a socket AM3 CPU with DDR2
124 * memory. We blacklist all the cores which do exist in socket AM2+
125 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
126 * and AM3 formats, but that's the best we can do.
127 */
128 return boot_cpu_data.x86_model < 4 ||
129 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
130}
131
132static int __devinit k10temp_probe(struct pci_dev *pdev,
133 const struct pci_device_id *id)
134{
135 struct device *hwmon_dev;
136 u32 reg_caps, reg_htc;
137 int unreliable = has_erratum_319(pdev);
138 int err;
139
140 if (unreliable && !force) {
141 dev_err(&pdev->dev,
142 "unreliable CPU thermal sensor; monitoring disabled\n");
143 err = -ENODEV;
144 goto exit;
145 }
146
147 err = device_create_file(&pdev->dev, &dev_attr_temp1_input);
148 if (err)
149 goto exit;
150 err = device_create_file(&pdev->dev, &dev_attr_temp1_max);
151 if (err)
152 goto exit_remove;
153
154 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, ®_caps);
155 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, ®_htc);
156 if ((reg_caps & NB_CAP_HTC) && (reg_htc & HTC_ENABLE)) {
157 err = device_create_file(&pdev->dev,
158 &sensor_dev_attr_temp1_crit.dev_attr);
159 if (err)
160 goto exit_remove;
161 err = device_create_file(&pdev->dev,
162 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
163 if (err)
164 goto exit_remove;
165 }
166
167 err = device_create_file(&pdev->dev, &dev_attr_name);
168 if (err)
169 goto exit_remove;
170
171 hwmon_dev = hwmon_device_register(&pdev->dev);
172 if (IS_ERR(hwmon_dev)) {
173 err = PTR_ERR(hwmon_dev);
174 goto exit_remove;
175 }
176 pci_set_drvdata(pdev, hwmon_dev);
177
178 if (unreliable && force)
179 dev_warn(&pdev->dev,
180 "unreliable CPU thermal sensor; check erratum 319\n");
181 return 0;
182
183exit_remove:
184 device_remove_file(&pdev->dev, &dev_attr_name);
185 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
186 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
187 device_remove_file(&pdev->dev,
188 &sensor_dev_attr_temp1_crit.dev_attr);
189 device_remove_file(&pdev->dev,
190 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
191exit:
192 return err;
193}
194
195static void __devexit k10temp_remove(struct pci_dev *pdev)
196{
197 hwmon_device_unregister(pci_get_drvdata(pdev));
198 device_remove_file(&pdev->dev, &dev_attr_name);
199 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
200 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
201 device_remove_file(&pdev->dev,
202 &sensor_dev_attr_temp1_crit.dev_attr);
203 device_remove_file(&pdev->dev,
204 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
205 pci_set_drvdata(pdev, NULL);
206}
207
208static const struct pci_device_id k10temp_id_table[] = {
209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
212 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
213 {}
214};
215MODULE_DEVICE_TABLE(pci, k10temp_id_table);
216
217static struct pci_driver k10temp_driver = {
218 .name = "k10temp",
219 .id_table = k10temp_id_table,
220 .probe = k10temp_probe,
221 .remove = __devexit_p(k10temp_remove),
222};
223
224static int __init k10temp_init(void)
225{
226 return pci_register_driver(&k10temp_driver);
227}
228
229static void __exit k10temp_exit(void)
230{
231 pci_unregister_driver(&k10temp_driver);
232}
233
234module_init(k10temp_init)
235module_exit(k10temp_exit)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
4 *
5 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
6 */
7
8#include <linux/err.h>
9#include <linux/hwmon.h>
10#include <linux/hwmon-sysfs.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/pci_ids.h>
15#include <asm/amd_nb.h>
16#include <asm/processor.h>
17
18MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
19MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
20MODULE_LICENSE("GPL");
21
22static bool force;
23module_param(force, bool, 0444);
24MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
25
26/* Provide lock for writing to NB_SMU_IND_ADDR */
27static DEFINE_MUTEX(nb_smu_ind_mutex);
28
29#ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
30#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
31#endif
32
33/* CPUID function 0x80000001, ebx */
34#define CPUID_PKGTYPE_MASK 0xf0000000
35#define CPUID_PKGTYPE_F 0x00000000
36#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
37
38/* DRAM controller (PCI function 2) */
39#define REG_DCT0_CONFIG_HIGH 0x094
40#define DDR3_MODE 0x00000100
41
42/* miscellaneous (PCI function 3) */
43#define REG_HARDWARE_THERMAL_CONTROL 0x64
44#define HTC_ENABLE 0x00000001
45
46#define REG_REPORTED_TEMPERATURE 0xa4
47
48#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
49#define NB_CAP_HTC 0x00000400
50
51/*
52 * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
53 * and REG_REPORTED_TEMPERATURE have been moved to
54 * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
55 * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
56 */
57#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
58#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
59
60/* F17h M01h Access througn SMN */
61#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
62
63struct k10temp_data {
64 struct pci_dev *pdev;
65 void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
66 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
67 int temp_offset;
68 u32 temp_adjust_mask;
69 bool show_tdie;
70};
71
72struct tctl_offset {
73 u8 model;
74 char const *id;
75 int offset;
76};
77
78static const struct tctl_offset tctl_offset_table[] = {
79 { 0x17, "AMD Ryzen 5 1600X", 20000 },
80 { 0x17, "AMD Ryzen 7 1700X", 20000 },
81 { 0x17, "AMD Ryzen 7 1800X", 20000 },
82 { 0x17, "AMD Ryzen 7 2700X", 10000 },
83 { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
84 { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
85};
86
87static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
88{
89 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
90}
91
92static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
93{
94 pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
95}
96
97static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
98 unsigned int base, int offset, u32 *val)
99{
100 mutex_lock(&nb_smu_ind_mutex);
101 pci_bus_write_config_dword(pdev->bus, devfn,
102 base, offset);
103 pci_bus_read_config_dword(pdev->bus, devfn,
104 base + 4, val);
105 mutex_unlock(&nb_smu_ind_mutex);
106}
107
108static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
109{
110 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
111 F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
112}
113
114static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
115{
116 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
117 F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
118}
119
120static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
121{
122 amd_smn_read(amd_pci_dev_to_node_id(pdev),
123 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
124}
125
126static unsigned int get_raw_temp(struct k10temp_data *data)
127{
128 unsigned int temp;
129 u32 regval;
130
131 data->read_tempreg(data->pdev, ®val);
132 temp = (regval >> 21) * 125;
133 if (regval & data->temp_adjust_mask)
134 temp -= 49000;
135 return temp;
136}
137
138static ssize_t temp1_input_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
140{
141 struct k10temp_data *data = dev_get_drvdata(dev);
142 unsigned int temp = get_raw_temp(data);
143
144 if (temp > data->temp_offset)
145 temp -= data->temp_offset;
146 else
147 temp = 0;
148
149 return sprintf(buf, "%u\n", temp);
150}
151
152static ssize_t temp2_input_show(struct device *dev,
153 struct device_attribute *devattr, char *buf)
154{
155 struct k10temp_data *data = dev_get_drvdata(dev);
156 unsigned int temp = get_raw_temp(data);
157
158 return sprintf(buf, "%u\n", temp);
159}
160
161static ssize_t temp_label_show(struct device *dev,
162 struct device_attribute *devattr, char *buf)
163{
164 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
165
166 return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
167}
168
169static ssize_t temp1_max_show(struct device *dev,
170 struct device_attribute *attr, char *buf)
171{
172 return sprintf(buf, "%d\n", 70 * 1000);
173}
174
175static ssize_t temp_crit_show(struct device *dev,
176 struct device_attribute *devattr, char *buf)
177{
178 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
179 struct k10temp_data *data = dev_get_drvdata(dev);
180 int show_hyst = attr->index;
181 u32 regval;
182 int value;
183
184 data->read_htcreg(data->pdev, ®val);
185 value = ((regval >> 16) & 0x7f) * 500 + 52000;
186 if (show_hyst)
187 value -= ((regval >> 24) & 0xf) * 500;
188 return sprintf(buf, "%d\n", value);
189}
190
191static DEVICE_ATTR_RO(temp1_input);
192static DEVICE_ATTR_RO(temp1_max);
193static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
194static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
195
196static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
197static DEVICE_ATTR_RO(temp2_input);
198static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
199
200static umode_t k10temp_is_visible(struct kobject *kobj,
201 struct attribute *attr, int index)
202{
203 struct device *dev = container_of(kobj, struct device, kobj);
204 struct k10temp_data *data = dev_get_drvdata(dev);
205 struct pci_dev *pdev = data->pdev;
206 u32 reg;
207
208 switch (index) {
209 case 0 ... 1: /* temp1_input, temp1_max */
210 default:
211 break;
212 case 2 ... 3: /* temp1_crit, temp1_crit_hyst */
213 if (!data->read_htcreg)
214 return 0;
215
216 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
217 ®);
218 if (!(reg & NB_CAP_HTC))
219 return 0;
220
221 data->read_htcreg(data->pdev, ®);
222 if (!(reg & HTC_ENABLE))
223 return 0;
224 break;
225 case 4 ... 6: /* temp1_label, temp2_input, temp2_label */
226 if (!data->show_tdie)
227 return 0;
228 break;
229 }
230 return attr->mode;
231}
232
233static struct attribute *k10temp_attrs[] = {
234 &dev_attr_temp1_input.attr,
235 &dev_attr_temp1_max.attr,
236 &sensor_dev_attr_temp1_crit.dev_attr.attr,
237 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
238 &sensor_dev_attr_temp1_label.dev_attr.attr,
239 &dev_attr_temp2_input.attr,
240 &sensor_dev_attr_temp2_label.dev_attr.attr,
241 NULL
242};
243
244static const struct attribute_group k10temp_group = {
245 .attrs = k10temp_attrs,
246 .is_visible = k10temp_is_visible,
247};
248__ATTRIBUTE_GROUPS(k10temp);
249
250static bool has_erratum_319(struct pci_dev *pdev)
251{
252 u32 pkg_type, reg_dram_cfg;
253
254 if (boot_cpu_data.x86 != 0x10)
255 return false;
256
257 /*
258 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
259 * may be unreliable.
260 */
261 pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
262 if (pkg_type == CPUID_PKGTYPE_F)
263 return true;
264 if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
265 return false;
266
267 /* DDR3 memory implies socket AM3, which is good */
268 pci_bus_read_config_dword(pdev->bus,
269 PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
270 REG_DCT0_CONFIG_HIGH, ®_dram_cfg);
271 if (reg_dram_cfg & DDR3_MODE)
272 return false;
273
274 /*
275 * Unfortunately it is possible to run a socket AM3 CPU with DDR2
276 * memory. We blacklist all the cores which do exist in socket AM2+
277 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
278 * and AM3 formats, but that's the best we can do.
279 */
280 return boot_cpu_data.x86_model < 4 ||
281 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
282}
283
284static int k10temp_probe(struct pci_dev *pdev,
285 const struct pci_device_id *id)
286{
287 int unreliable = has_erratum_319(pdev);
288 struct device *dev = &pdev->dev;
289 struct k10temp_data *data;
290 struct device *hwmon_dev;
291 int i;
292
293 if (unreliable) {
294 if (!force) {
295 dev_err(dev,
296 "unreliable CPU thermal sensor; monitoring disabled\n");
297 return -ENODEV;
298 }
299 dev_warn(dev,
300 "unreliable CPU thermal sensor; check erratum 319\n");
301 }
302
303 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
304 if (!data)
305 return -ENOMEM;
306
307 data->pdev = pdev;
308
309 if (boot_cpu_data.x86 == 0x15 &&
310 ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
311 (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
312 data->read_htcreg = read_htcreg_nb_f15;
313 data->read_tempreg = read_tempreg_nb_f15;
314 } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
315 data->temp_adjust_mask = 0x80000;
316 data->read_tempreg = read_tempreg_nb_f17;
317 data->show_tdie = true;
318 } else {
319 data->read_htcreg = read_htcreg_pci;
320 data->read_tempreg = read_tempreg_pci;
321 }
322
323 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
324 const struct tctl_offset *entry = &tctl_offset_table[i];
325
326 if (boot_cpu_data.x86 == entry->model &&
327 strstr(boot_cpu_data.x86_model_id, entry->id)) {
328 data->temp_offset = entry->offset;
329 break;
330 }
331 }
332
333 hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
334 k10temp_groups);
335 return PTR_ERR_OR_ZERO(hwmon_dev);
336}
337
338static const struct pci_device_id k10temp_id_table[] = {
339 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
340 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
341 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
342 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
343 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
344 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
345 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
346 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) },
347 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
348 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
349 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
350 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
351 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
352 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
353 { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
354 {}
355};
356MODULE_DEVICE_TABLE(pci, k10temp_id_table);
357
358static struct pci_driver k10temp_driver = {
359 .name = "k10temp",
360 .id_table = k10temp_id_table,
361 .probe = k10temp_probe,
362};
363
364module_pci_driver(k10temp_driver);