Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Management Engine (FME)
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Kang Luwei <luwei.kang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Joseph Grecco <joe.grecco@intel.com>
 11 *   Enno Luebbers <enno.luebbers@intel.com>
 12 *   Tim Whisonant <tim.whisonant@intel.com>
 13 *   Ananda Ravuri <ananda.ravuri@intel.com>
 14 *   Henry Mitchel <henry.mitchel@intel.com>
 15 */
 16
 17#include <linux/hwmon.h>
 18#include <linux/hwmon-sysfs.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/uaccess.h>
 
 22#include <linux/fpga-dfl.h>
 23
 24#include "dfl.h"
 25#include "dfl-fme.h"
 26
 27static ssize_t ports_num_show(struct device *dev,
 28			      struct device_attribute *attr, char *buf)
 29{
 30	void __iomem *base;
 31	u64 v;
 32
 33	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 34
 35	v = readq(base + FME_HDR_CAP);
 36
 37	return scnprintf(buf, PAGE_SIZE, "%u\n",
 38			 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
 39}
 40static DEVICE_ATTR_RO(ports_num);
 41
 42/*
 43 * Bitstream (static FPGA region) identifier number. It contains the
 44 * detailed version and other information of this static FPGA region.
 45 */
 46static ssize_t bitstream_id_show(struct device *dev,
 47				 struct device_attribute *attr, char *buf)
 48{
 49	void __iomem *base;
 50	u64 v;
 51
 52	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 53
 54	v = readq(base + FME_HDR_BITSTREAM_ID);
 55
 56	return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
 57}
 58static DEVICE_ATTR_RO(bitstream_id);
 59
 60/*
 61 * Bitstream (static FPGA region) meta data. It contains the synthesis
 62 * date, seed and other information of this static FPGA region.
 63 */
 64static ssize_t bitstream_metadata_show(struct device *dev,
 65				       struct device_attribute *attr, char *buf)
 66{
 67	void __iomem *base;
 68	u64 v;
 69
 70	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 71
 72	v = readq(base + FME_HDR_BITSTREAM_MD);
 73
 74	return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
 75}
 76static DEVICE_ATTR_RO(bitstream_metadata);
 77
 78static ssize_t cache_size_show(struct device *dev,
 79			       struct device_attribute *attr, char *buf)
 80{
 81	void __iomem *base;
 82	u64 v;
 83
 84	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 85
 86	v = readq(base + FME_HDR_CAP);
 87
 88	return sprintf(buf, "%u\n",
 89		       (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
 90}
 91static DEVICE_ATTR_RO(cache_size);
 92
 93static ssize_t fabric_version_show(struct device *dev,
 94				   struct device_attribute *attr, char *buf)
 95{
 96	void __iomem *base;
 97	u64 v;
 98
 99	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
100
101	v = readq(base + FME_HDR_CAP);
102
103	return sprintf(buf, "%u\n",
104		       (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
105}
106static DEVICE_ATTR_RO(fabric_version);
107
108static ssize_t socket_id_show(struct device *dev,
109			      struct device_attribute *attr, char *buf)
110{
111	void __iomem *base;
112	u64 v;
113
114	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
115
116	v = readq(base + FME_HDR_CAP);
117
118	return sprintf(buf, "%u\n",
119		       (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
120}
121static DEVICE_ATTR_RO(socket_id);
122
123static struct attribute *fme_hdr_attrs[] = {
124	&dev_attr_ports_num.attr,
125	&dev_attr_bitstream_id.attr,
126	&dev_attr_bitstream_metadata.attr,
127	&dev_attr_cache_size.attr,
128	&dev_attr_fabric_version.attr,
129	&dev_attr_socket_id.attr,
130	NULL,
131};
132
133static const struct attribute_group fme_hdr_group = {
134	.attrs = fme_hdr_attrs,
135};
136
137static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
138				       unsigned long arg)
139{
140	struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
141	int port_id;
142
143	if (get_user(port_id, (int __user *)arg))
144		return -EFAULT;
145
146	return dfl_fpga_cdev_release_port(cdev, port_id);
147}
148
149static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
150				      unsigned long arg)
151{
152	struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
153	int port_id;
154
155	if (get_user(port_id, (int __user *)arg))
156		return -EFAULT;
157
158	return dfl_fpga_cdev_assign_port(cdev, port_id);
159}
160
161static long fme_hdr_ioctl(struct platform_device *pdev,
162			  struct dfl_feature *feature,
163			  unsigned int cmd, unsigned long arg)
164{
165	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
166
167	switch (cmd) {
168	case DFL_FPGA_FME_PORT_RELEASE:
169		return fme_hdr_ioctl_release_port(pdata, arg);
170	case DFL_FPGA_FME_PORT_ASSIGN:
171		return fme_hdr_ioctl_assign_port(pdata, arg);
172	}
173
174	return -ENODEV;
175}
176
177static const struct dfl_feature_id fme_hdr_id_table[] = {
178	{.id = FME_FEATURE_ID_HEADER,},
179	{0,}
180};
181
182static const struct dfl_feature_ops fme_hdr_ops = {
183	.ioctl = fme_hdr_ioctl,
184};
185
186#define FME_THERM_THRESHOLD	0x8
187#define TEMP_THRESHOLD1		GENMASK_ULL(6, 0)
188#define TEMP_THRESHOLD1_EN	BIT_ULL(7)
189#define TEMP_THRESHOLD2		GENMASK_ULL(14, 8)
190#define TEMP_THRESHOLD2_EN	BIT_ULL(15)
191#define TRIP_THRESHOLD		GENMASK_ULL(30, 24)
192#define TEMP_THRESHOLD1_STATUS	BIT_ULL(32)		/* threshold1 reached */
193#define TEMP_THRESHOLD2_STATUS	BIT_ULL(33)		/* threshold2 reached */
194/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
195#define TEMP_THRESHOLD1_POLICY	BIT_ULL(44)
196
197#define FME_THERM_RDSENSOR_FMT1	0x10
198#define FPGA_TEMPERATURE	GENMASK_ULL(6, 0)
199
200#define FME_THERM_CAP		0x20
201#define THERM_NO_THROTTLE	BIT_ULL(0)
202
203#define MD_PRE_DEG
204
205static bool fme_thermal_throttle_support(void __iomem *base)
206{
207	u64 v = readq(base + FME_THERM_CAP);
208
209	return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
210}
211
212static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
213					   enum hwmon_sensor_types type,
214					   u32 attr, int channel)
215{
216	const struct dfl_feature *feature = drvdata;
217
218	/* temperature is always supported, and check hardware cap for others */
219	if (attr == hwmon_temp_input)
220		return 0444;
221
222	return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
223}
224
225static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
226			      u32 attr, int channel, long *val)
227{
228	struct dfl_feature *feature = dev_get_drvdata(dev);
229	u64 v;
230
231	switch (attr) {
232	case hwmon_temp_input:
233		v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
234		*val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
235		break;
236	case hwmon_temp_max:
237		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
238		*val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
239		break;
240	case hwmon_temp_crit:
241		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
242		*val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
243		break;
244	case hwmon_temp_emergency:
245		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
246		*val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
247		break;
248	case hwmon_temp_max_alarm:
249		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
250		*val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
251		break;
252	case hwmon_temp_crit_alarm:
253		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
254		*val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
255		break;
256	default:
257		return -EOPNOTSUPP;
258	}
259
260	return 0;
261}
262
263static const struct hwmon_ops thermal_hwmon_ops = {
264	.is_visible = thermal_hwmon_attrs_visible,
265	.read = thermal_hwmon_read,
266};
267
268static const struct hwmon_channel_info *thermal_hwmon_info[] = {
269	HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
270				 HWMON_T_MAX   | HWMON_T_MAX_ALARM |
271				 HWMON_T_CRIT  | HWMON_T_CRIT_ALARM),
272	NULL
273};
274
275static const struct hwmon_chip_info thermal_hwmon_chip_info = {
276	.ops = &thermal_hwmon_ops,
277	.info = thermal_hwmon_info,
278};
279
280static ssize_t temp1_max_policy_show(struct device *dev,
281				     struct device_attribute *attr, char *buf)
282{
283	struct dfl_feature *feature = dev_get_drvdata(dev);
284	u64 v;
285
286	v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
287
288	return sprintf(buf, "%u\n",
289		       (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
290}
291
292static DEVICE_ATTR_RO(temp1_max_policy);
293
294static struct attribute *thermal_extra_attrs[] = {
295	&dev_attr_temp1_max_policy.attr,
296	NULL,
297};
298
299static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
300					   struct attribute *attr, int index)
301{
302	struct device *dev = kobj_to_dev(kobj);
303	struct dfl_feature *feature = dev_get_drvdata(dev);
304
305	return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
306}
307
308static const struct attribute_group thermal_extra_group = {
309	.attrs		= thermal_extra_attrs,
310	.is_visible	= thermal_extra_attrs_visible,
311};
312__ATTRIBUTE_GROUPS(thermal_extra);
313
314static int fme_thermal_mgmt_init(struct platform_device *pdev,
315				 struct dfl_feature *feature)
316{
317	struct device *hwmon;
318
319	/*
320	 * create hwmon to allow userspace monitoring temperature and other
321	 * threshold information.
322	 *
323	 * temp1_input      -> FPGA device temperature
324	 * temp1_max        -> hardware threshold 1 -> 50% or 90% throttling
325	 * temp1_crit       -> hardware threshold 2 -> 100% throttling
326	 * temp1_emergency  -> hardware trip_threshold to shutdown FPGA
327	 * temp1_max_alarm  -> hardware threshold 1 alarm
328	 * temp1_crit_alarm -> hardware threshold 2 alarm
329	 *
330	 * create device specific sysfs interfaces, e.g. read temp1_max_policy
331	 * to understand the actual hardware throttling action (50% vs 90%).
332	 *
333	 * If hardware doesn't support automatic throttling per thresholds,
334	 * then all above sysfs interfaces are not visible except temp1_input
335	 * for temperature.
336	 */
337	hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
338						     "dfl_fme_thermal", feature,
339						     &thermal_hwmon_chip_info,
340						     thermal_extra_groups);
341	if (IS_ERR(hwmon)) {
342		dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
343		return PTR_ERR(hwmon);
344	}
345
346	return 0;
347}
348
349static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
350	{.id = FME_FEATURE_ID_THERMAL_MGMT,},
351	{0,}
352};
353
354static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
355	.init = fme_thermal_mgmt_init,
356};
357
358#define FME_PWR_STATUS		0x8
359#define FME_LATENCY_TOLERANCE	BIT_ULL(18)
360#define PWR_CONSUMED		GENMASK_ULL(17, 0)
361
362#define FME_PWR_THRESHOLD	0x10
363#define PWR_THRESHOLD1		GENMASK_ULL(6, 0)	/* in Watts */
364#define PWR_THRESHOLD2		GENMASK_ULL(14, 8)	/* in Watts */
365#define PWR_THRESHOLD_MAX	0x7f			/* in Watts */
366#define PWR_THRESHOLD1_STATUS	BIT_ULL(16)
367#define PWR_THRESHOLD2_STATUS	BIT_ULL(17)
368
369#define FME_PWR_XEON_LIMIT	0x18
370#define XEON_PWR_LIMIT		GENMASK_ULL(14, 0)	/* in 0.1 Watts */
371#define XEON_PWR_EN		BIT_ULL(15)
372#define FME_PWR_FPGA_LIMIT	0x20
373#define FPGA_PWR_LIMIT		GENMASK_ULL(14, 0)	/* in 0.1 Watts */
374#define FPGA_PWR_EN		BIT_ULL(15)
375
376static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
377			    u32 attr, int channel, long *val)
378{
379	struct dfl_feature *feature = dev_get_drvdata(dev);
380	u64 v;
381
382	switch (attr) {
383	case hwmon_power_input:
384		v = readq(feature->ioaddr + FME_PWR_STATUS);
385		*val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
386		break;
387	case hwmon_power_max:
388		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
389		*val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
390		break;
391	case hwmon_power_crit:
392		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
393		*val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
394		break;
395	case hwmon_power_max_alarm:
396		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
397		*val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
398		break;
399	case hwmon_power_crit_alarm:
400		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
401		*val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
402		break;
403	default:
404		return -EOPNOTSUPP;
405	}
406
407	return 0;
408}
409
410static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
411			     u32 attr, int channel, long val)
412{
413	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
414	struct dfl_feature *feature = dev_get_drvdata(dev);
415	int ret = 0;
416	u64 v;
417
418	val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
419
420	mutex_lock(&pdata->lock);
421
422	switch (attr) {
423	case hwmon_power_max:
424		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
425		v &= ~PWR_THRESHOLD1;
426		v |= FIELD_PREP(PWR_THRESHOLD1, val);
427		writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
428		break;
429	case hwmon_power_crit:
430		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
431		v &= ~PWR_THRESHOLD2;
432		v |= FIELD_PREP(PWR_THRESHOLD2, val);
433		writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
434		break;
435	default:
436		ret = -EOPNOTSUPP;
437		break;
438	}
439
440	mutex_unlock(&pdata->lock);
441
442	return ret;
443}
444
445static umode_t power_hwmon_attrs_visible(const void *drvdata,
446					 enum hwmon_sensor_types type,
447					 u32 attr, int channel)
448{
449	switch (attr) {
450	case hwmon_power_input:
451	case hwmon_power_max_alarm:
452	case hwmon_power_crit_alarm:
453		return 0444;
454	case hwmon_power_max:
455	case hwmon_power_crit:
456		return 0644;
457	}
458
459	return 0;
460}
461
462static const struct hwmon_ops power_hwmon_ops = {
463	.is_visible = power_hwmon_attrs_visible,
464	.read = power_hwmon_read,
465	.write = power_hwmon_write,
466};
467
468static const struct hwmon_channel_info *power_hwmon_info[] = {
469	HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
470				  HWMON_P_MAX   | HWMON_P_MAX_ALARM |
471				  HWMON_P_CRIT  | HWMON_P_CRIT_ALARM),
472	NULL
473};
474
475static const struct hwmon_chip_info power_hwmon_chip_info = {
476	.ops = &power_hwmon_ops,
477	.info = power_hwmon_info,
478};
479
480static ssize_t power1_xeon_limit_show(struct device *dev,
481				      struct device_attribute *attr, char *buf)
482{
483	struct dfl_feature *feature = dev_get_drvdata(dev);
484	u16 xeon_limit = 0;
485	u64 v;
486
487	v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
488
489	if (FIELD_GET(XEON_PWR_EN, v))
490		xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
491
492	return sprintf(buf, "%u\n", xeon_limit * 100000);
493}
494
495static ssize_t power1_fpga_limit_show(struct device *dev,
496				      struct device_attribute *attr, char *buf)
497{
498	struct dfl_feature *feature = dev_get_drvdata(dev);
499	u16 fpga_limit = 0;
500	u64 v;
501
502	v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
503
504	if (FIELD_GET(FPGA_PWR_EN, v))
505		fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
506
507	return sprintf(buf, "%u\n", fpga_limit * 100000);
508}
509
510static ssize_t power1_ltr_show(struct device *dev,
511			       struct device_attribute *attr, char *buf)
512{
513	struct dfl_feature *feature = dev_get_drvdata(dev);
514	u64 v;
515
516	v = readq(feature->ioaddr + FME_PWR_STATUS);
517
518	return sprintf(buf, "%u\n",
519		       (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
520}
521
522static DEVICE_ATTR_RO(power1_xeon_limit);
523static DEVICE_ATTR_RO(power1_fpga_limit);
524static DEVICE_ATTR_RO(power1_ltr);
525
526static struct attribute *power_extra_attrs[] = {
527	&dev_attr_power1_xeon_limit.attr,
528	&dev_attr_power1_fpga_limit.attr,
529	&dev_attr_power1_ltr.attr,
530	NULL
531};
532
533ATTRIBUTE_GROUPS(power_extra);
534
535static int fme_power_mgmt_init(struct platform_device *pdev,
536			       struct dfl_feature *feature)
537{
538	struct device *hwmon;
539
540	hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
541						     "dfl_fme_power", feature,
542						     &power_hwmon_chip_info,
543						     power_extra_groups);
544	if (IS_ERR(hwmon)) {
545		dev_err(&pdev->dev, "Fail to register power hwmon\n");
546		return PTR_ERR(hwmon);
547	}
548
549	return 0;
550}
551
552static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
553	{.id = FME_FEATURE_ID_POWER_MGMT,},
554	{0,}
555};
556
557static const struct dfl_feature_ops fme_power_mgmt_ops = {
558	.init = fme_power_mgmt_init,
559};
560
561static struct dfl_feature_driver fme_feature_drvs[] = {
562	{
563		.id_table = fme_hdr_id_table,
564		.ops = &fme_hdr_ops,
565	},
566	{
567		.id_table = fme_pr_mgmt_id_table,
568		.ops = &fme_pr_mgmt_ops,
569	},
570	{
571		.id_table = fme_global_err_id_table,
572		.ops = &fme_global_err_ops,
573	},
574	{
575		.id_table = fme_thermal_mgmt_id_table,
576		.ops = &fme_thermal_mgmt_ops,
577	},
578	{
579		.id_table = fme_power_mgmt_id_table,
580		.ops = &fme_power_mgmt_ops,
581	},
582	{
583		.id_table = fme_perf_id_table,
584		.ops = &fme_perf_ops,
585	},
586	{
587		.ops = NULL,
588	},
589};
590
591static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
592				      unsigned long arg)
593{
594	/* No extension support for now */
595	return 0;
596}
597
598static int fme_open(struct inode *inode, struct file *filp)
599{
600	struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
601	struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
602	int ret;
603
604	if (WARN_ON(!pdata))
605		return -ENODEV;
606
607	mutex_lock(&pdata->lock);
608	ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
609	if (!ret) {
610		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
611			dfl_feature_dev_use_count(pdata));
612		filp->private_data = pdata;
613	}
614	mutex_unlock(&pdata->lock);
615
616	return ret;
617}
618
619static int fme_release(struct inode *inode, struct file *filp)
620{
621	struct dfl_feature_platform_data *pdata = filp->private_data;
622	struct platform_device *pdev = pdata->dev;
623	struct dfl_feature *feature;
624
625	dev_dbg(&pdev->dev, "Device File Release\n");
626
627	mutex_lock(&pdata->lock);
628	dfl_feature_dev_use_end(pdata);
629
630	if (!dfl_feature_dev_use_count(pdata))
631		dfl_fpga_dev_for_each_feature(pdata, feature)
632			dfl_fpga_set_irq_triggers(feature, 0,
633						  feature->nr_irqs, NULL);
634	mutex_unlock(&pdata->lock);
635
636	return 0;
637}
638
639static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
640{
641	struct dfl_feature_platform_data *pdata = filp->private_data;
642	struct platform_device *pdev = pdata->dev;
643	struct dfl_feature *f;
644	long ret;
645
646	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
647
648	switch (cmd) {
649	case DFL_FPGA_GET_API_VERSION:
650		return DFL_FPGA_API_VERSION;
651	case DFL_FPGA_CHECK_EXTENSION:
652		return fme_ioctl_check_extension(pdata, arg);
653	default:
654		/*
655		 * Let sub-feature's ioctl function to handle the cmd.
656		 * Sub-feature's ioctl returns -ENODEV when cmd is not
657		 * handled in this sub feature, and returns 0 or other
658		 * error code if cmd is handled.
659		 */
660		dfl_fpga_dev_for_each_feature(pdata, f) {
661			if (f->ops && f->ops->ioctl) {
662				ret = f->ops->ioctl(pdev, f, cmd, arg);
663				if (ret != -ENODEV)
664					return ret;
665			}
666		}
667	}
668
669	return -EINVAL;
670}
671
672static int fme_dev_init(struct platform_device *pdev)
673{
674	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
675	struct dfl_fme *fme;
676
677	fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
678	if (!fme)
679		return -ENOMEM;
680
681	fme->pdata = pdata;
682
683	mutex_lock(&pdata->lock);
684	dfl_fpga_pdata_set_private(pdata, fme);
685	mutex_unlock(&pdata->lock);
686
687	return 0;
688}
689
690static void fme_dev_destroy(struct platform_device *pdev)
691{
692	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
693
694	mutex_lock(&pdata->lock);
695	dfl_fpga_pdata_set_private(pdata, NULL);
696	mutex_unlock(&pdata->lock);
697}
698
699static const struct file_operations fme_fops = {
700	.owner		= THIS_MODULE,
701	.open		= fme_open,
702	.release	= fme_release,
703	.unlocked_ioctl = fme_ioctl,
704};
705
706static int fme_probe(struct platform_device *pdev)
707{
708	int ret;
709
710	ret = fme_dev_init(pdev);
711	if (ret)
712		goto exit;
713
714	ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
715	if (ret)
716		goto dev_destroy;
717
718	ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
719	if (ret)
720		goto feature_uinit;
721
722	return 0;
723
724feature_uinit:
725	dfl_fpga_dev_feature_uinit(pdev);
726dev_destroy:
727	fme_dev_destroy(pdev);
728exit:
729	return ret;
730}
731
732static int fme_remove(struct platform_device *pdev)
733{
734	dfl_fpga_dev_ops_unregister(pdev);
735	dfl_fpga_dev_feature_uinit(pdev);
736	fme_dev_destroy(pdev);
737
738	return 0;
739}
740
741static const struct attribute_group *fme_dev_groups[] = {
742	&fme_hdr_group,
743	&fme_global_err_group,
744	NULL
745};
746
747static struct platform_driver fme_driver = {
748	.driver	= {
749		.name       = DFL_FPGA_FEATURE_DEV_FME,
750		.dev_groups = fme_dev_groups,
751	},
752	.probe   = fme_probe,
753	.remove  = fme_remove,
754};
755
756module_platform_driver(fme_driver);
757
758MODULE_DESCRIPTION("FPGA Management Engine driver");
759MODULE_AUTHOR("Intel Corporation");
760MODULE_LICENSE("GPL v2");
761MODULE_ALIAS("platform:dfl-fme");
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Management Engine (FME)
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Kang Luwei <luwei.kang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Joseph Grecco <joe.grecco@intel.com>
 11 *   Enno Luebbers <enno.luebbers@intel.com>
 12 *   Tim Whisonant <tim.whisonant@intel.com>
 13 *   Ananda Ravuri <ananda.ravuri@intel.com>
 14 *   Henry Mitchel <henry.mitchel@intel.com>
 15 */
 16
 17#include <linux/hwmon.h>
 18#include <linux/hwmon-sysfs.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/uaccess.h>
 22#include <linux/units.h>
 23#include <linux/fpga-dfl.h>
 24
 25#include "dfl.h"
 26#include "dfl-fme.h"
 27
 28static ssize_t ports_num_show(struct device *dev,
 29			      struct device_attribute *attr, char *buf)
 30{
 31	void __iomem *base;
 32	u64 v;
 33
 34	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 35
 36	v = readq(base + FME_HDR_CAP);
 37
 38	return scnprintf(buf, PAGE_SIZE, "%u\n",
 39			 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
 40}
 41static DEVICE_ATTR_RO(ports_num);
 42
 43/*
 44 * Bitstream (static FPGA region) identifier number. It contains the
 45 * detailed version and other information of this static FPGA region.
 46 */
 47static ssize_t bitstream_id_show(struct device *dev,
 48				 struct device_attribute *attr, char *buf)
 49{
 50	void __iomem *base;
 51	u64 v;
 52
 53	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 54
 55	v = readq(base + FME_HDR_BITSTREAM_ID);
 56
 57	return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
 58}
 59static DEVICE_ATTR_RO(bitstream_id);
 60
 61/*
 62 * Bitstream (static FPGA region) meta data. It contains the synthesis
 63 * date, seed and other information of this static FPGA region.
 64 */
 65static ssize_t bitstream_metadata_show(struct device *dev,
 66				       struct device_attribute *attr, char *buf)
 67{
 68	void __iomem *base;
 69	u64 v;
 70
 71	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 72
 73	v = readq(base + FME_HDR_BITSTREAM_MD);
 74
 75	return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
 76}
 77static DEVICE_ATTR_RO(bitstream_metadata);
 78
 79static ssize_t cache_size_show(struct device *dev,
 80			       struct device_attribute *attr, char *buf)
 81{
 82	void __iomem *base;
 83	u64 v;
 84
 85	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
 86
 87	v = readq(base + FME_HDR_CAP);
 88
 89	return sprintf(buf, "%u\n",
 90		       (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
 91}
 92static DEVICE_ATTR_RO(cache_size);
 93
 94static ssize_t fabric_version_show(struct device *dev,
 95				   struct device_attribute *attr, char *buf)
 96{
 97	void __iomem *base;
 98	u64 v;
 99
100	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
101
102	v = readq(base + FME_HDR_CAP);
103
104	return sprintf(buf, "%u\n",
105		       (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
106}
107static DEVICE_ATTR_RO(fabric_version);
108
109static ssize_t socket_id_show(struct device *dev,
110			      struct device_attribute *attr, char *buf)
111{
112	void __iomem *base;
113	u64 v;
114
115	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
116
117	v = readq(base + FME_HDR_CAP);
118
119	return sprintf(buf, "%u\n",
120		       (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
121}
122static DEVICE_ATTR_RO(socket_id);
123
124static struct attribute *fme_hdr_attrs[] = {
125	&dev_attr_ports_num.attr,
126	&dev_attr_bitstream_id.attr,
127	&dev_attr_bitstream_metadata.attr,
128	&dev_attr_cache_size.attr,
129	&dev_attr_fabric_version.attr,
130	&dev_attr_socket_id.attr,
131	NULL,
132};
133
134static const struct attribute_group fme_hdr_group = {
135	.attrs = fme_hdr_attrs,
136};
137
138static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
139				       unsigned long arg)
140{
141	struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
142	int port_id;
143
144	if (get_user(port_id, (int __user *)arg))
145		return -EFAULT;
146
147	return dfl_fpga_cdev_release_port(cdev, port_id);
148}
149
150static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
151				      unsigned long arg)
152{
153	struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
154	int port_id;
155
156	if (get_user(port_id, (int __user *)arg))
157		return -EFAULT;
158
159	return dfl_fpga_cdev_assign_port(cdev, port_id);
160}
161
162static long fme_hdr_ioctl(struct platform_device *pdev,
163			  struct dfl_feature *feature,
164			  unsigned int cmd, unsigned long arg)
165{
166	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
167
168	switch (cmd) {
169	case DFL_FPGA_FME_PORT_RELEASE:
170		return fme_hdr_ioctl_release_port(pdata, arg);
171	case DFL_FPGA_FME_PORT_ASSIGN:
172		return fme_hdr_ioctl_assign_port(pdata, arg);
173	}
174
175	return -ENODEV;
176}
177
178static const struct dfl_feature_id fme_hdr_id_table[] = {
179	{.id = FME_FEATURE_ID_HEADER,},
180	{0,}
181};
182
183static const struct dfl_feature_ops fme_hdr_ops = {
184	.ioctl = fme_hdr_ioctl,
185};
186
187#define FME_THERM_THRESHOLD	0x8
188#define TEMP_THRESHOLD1		GENMASK_ULL(6, 0)
189#define TEMP_THRESHOLD1_EN	BIT_ULL(7)
190#define TEMP_THRESHOLD2		GENMASK_ULL(14, 8)
191#define TEMP_THRESHOLD2_EN	BIT_ULL(15)
192#define TRIP_THRESHOLD		GENMASK_ULL(30, 24)
193#define TEMP_THRESHOLD1_STATUS	BIT_ULL(32)		/* threshold1 reached */
194#define TEMP_THRESHOLD2_STATUS	BIT_ULL(33)		/* threshold2 reached */
195/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
196#define TEMP_THRESHOLD1_POLICY	BIT_ULL(44)
197
198#define FME_THERM_RDSENSOR_FMT1	0x10
199#define FPGA_TEMPERATURE	GENMASK_ULL(6, 0)
200
201#define FME_THERM_CAP		0x20
202#define THERM_NO_THROTTLE	BIT_ULL(0)
203
204#define MD_PRE_DEG
205
206static bool fme_thermal_throttle_support(void __iomem *base)
207{
208	u64 v = readq(base + FME_THERM_CAP);
209
210	return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
211}
212
213static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
214					   enum hwmon_sensor_types type,
215					   u32 attr, int channel)
216{
217	const struct dfl_feature *feature = drvdata;
218
219	/* temperature is always supported, and check hardware cap for others */
220	if (attr == hwmon_temp_input)
221		return 0444;
222
223	return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
224}
225
226static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
227			      u32 attr, int channel, long *val)
228{
229	struct dfl_feature *feature = dev_get_drvdata(dev);
230	u64 v;
231
232	switch (attr) {
233	case hwmon_temp_input:
234		v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
235		*val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
236		break;
237	case hwmon_temp_max:
238		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
239		*val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
240		break;
241	case hwmon_temp_crit:
242		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
243		*val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
244		break;
245	case hwmon_temp_emergency:
246		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
247		*val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
248		break;
249	case hwmon_temp_max_alarm:
250		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
251		*val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
252		break;
253	case hwmon_temp_crit_alarm:
254		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
255		*val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
256		break;
257	default:
258		return -EOPNOTSUPP;
259	}
260
261	return 0;
262}
263
264static const struct hwmon_ops thermal_hwmon_ops = {
265	.is_visible = thermal_hwmon_attrs_visible,
266	.read = thermal_hwmon_read,
267};
268
269static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
270	HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
271				 HWMON_T_MAX   | HWMON_T_MAX_ALARM |
272				 HWMON_T_CRIT  | HWMON_T_CRIT_ALARM),
273	NULL
274};
275
276static const struct hwmon_chip_info thermal_hwmon_chip_info = {
277	.ops = &thermal_hwmon_ops,
278	.info = thermal_hwmon_info,
279};
280
281static ssize_t temp1_max_policy_show(struct device *dev,
282				     struct device_attribute *attr, char *buf)
283{
284	struct dfl_feature *feature = dev_get_drvdata(dev);
285	u64 v;
286
287	v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
288
289	return sprintf(buf, "%u\n",
290		       (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
291}
292
293static DEVICE_ATTR_RO(temp1_max_policy);
294
295static struct attribute *thermal_extra_attrs[] = {
296	&dev_attr_temp1_max_policy.attr,
297	NULL,
298};
299
300static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
301					   struct attribute *attr, int index)
302{
303	struct device *dev = kobj_to_dev(kobj);
304	struct dfl_feature *feature = dev_get_drvdata(dev);
305
306	return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
307}
308
309static const struct attribute_group thermal_extra_group = {
310	.attrs		= thermal_extra_attrs,
311	.is_visible	= thermal_extra_attrs_visible,
312};
313__ATTRIBUTE_GROUPS(thermal_extra);
314
315static int fme_thermal_mgmt_init(struct platform_device *pdev,
316				 struct dfl_feature *feature)
317{
318	struct device *hwmon;
319
320	/*
321	 * create hwmon to allow userspace monitoring temperature and other
322	 * threshold information.
323	 *
324	 * temp1_input      -> FPGA device temperature
325	 * temp1_max        -> hardware threshold 1 -> 50% or 90% throttling
326	 * temp1_crit       -> hardware threshold 2 -> 100% throttling
327	 * temp1_emergency  -> hardware trip_threshold to shutdown FPGA
328	 * temp1_max_alarm  -> hardware threshold 1 alarm
329	 * temp1_crit_alarm -> hardware threshold 2 alarm
330	 *
331	 * create device specific sysfs interfaces, e.g. read temp1_max_policy
332	 * to understand the actual hardware throttling action (50% vs 90%).
333	 *
334	 * If hardware doesn't support automatic throttling per thresholds,
335	 * then all above sysfs interfaces are not visible except temp1_input
336	 * for temperature.
337	 */
338	hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
339						     "dfl_fme_thermal", feature,
340						     &thermal_hwmon_chip_info,
341						     thermal_extra_groups);
342	if (IS_ERR(hwmon)) {
343		dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
344		return PTR_ERR(hwmon);
345	}
346
347	return 0;
348}
349
350static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
351	{.id = FME_FEATURE_ID_THERMAL_MGMT,},
352	{0,}
353};
354
355static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
356	.init = fme_thermal_mgmt_init,
357};
358
359#define FME_PWR_STATUS		0x8
360#define FME_LATENCY_TOLERANCE	BIT_ULL(18)
361#define PWR_CONSUMED		GENMASK_ULL(17, 0)
362
363#define FME_PWR_THRESHOLD	0x10
364#define PWR_THRESHOLD1		GENMASK_ULL(6, 0)	/* in Watts */
365#define PWR_THRESHOLD2		GENMASK_ULL(14, 8)	/* in Watts */
366#define PWR_THRESHOLD_MAX	0x7f			/* in Watts */
367#define PWR_THRESHOLD1_STATUS	BIT_ULL(16)
368#define PWR_THRESHOLD2_STATUS	BIT_ULL(17)
369
370#define FME_PWR_XEON_LIMIT	0x18
371#define XEON_PWR_LIMIT		GENMASK_ULL(14, 0)	/* in 0.1 Watts */
372#define XEON_PWR_EN		BIT_ULL(15)
373#define FME_PWR_FPGA_LIMIT	0x20
374#define FPGA_PWR_LIMIT		GENMASK_ULL(14, 0)	/* in 0.1 Watts */
375#define FPGA_PWR_EN		BIT_ULL(15)
376
377static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
378			    u32 attr, int channel, long *val)
379{
380	struct dfl_feature *feature = dev_get_drvdata(dev);
381	u64 v;
382
383	switch (attr) {
384	case hwmon_power_input:
385		v = readq(feature->ioaddr + FME_PWR_STATUS);
386		*val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
387		break;
388	case hwmon_power_max:
389		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
390		*val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
391		break;
392	case hwmon_power_crit:
393		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
394		*val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
395		break;
396	case hwmon_power_max_alarm:
397		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
398		*val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
399		break;
400	case hwmon_power_crit_alarm:
401		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
402		*val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
403		break;
404	default:
405		return -EOPNOTSUPP;
406	}
407
408	return 0;
409}
410
411static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
412			     u32 attr, int channel, long val)
413{
414	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
415	struct dfl_feature *feature = dev_get_drvdata(dev);
416	int ret = 0;
417	u64 v;
418
419	val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
420
421	mutex_lock(&pdata->lock);
422
423	switch (attr) {
424	case hwmon_power_max:
425		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
426		v &= ~PWR_THRESHOLD1;
427		v |= FIELD_PREP(PWR_THRESHOLD1, val);
428		writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
429		break;
430	case hwmon_power_crit:
431		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
432		v &= ~PWR_THRESHOLD2;
433		v |= FIELD_PREP(PWR_THRESHOLD2, val);
434		writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
435		break;
436	default:
437		ret = -EOPNOTSUPP;
438		break;
439	}
440
441	mutex_unlock(&pdata->lock);
442
443	return ret;
444}
445
446static umode_t power_hwmon_attrs_visible(const void *drvdata,
447					 enum hwmon_sensor_types type,
448					 u32 attr, int channel)
449{
450	switch (attr) {
451	case hwmon_power_input:
452	case hwmon_power_max_alarm:
453	case hwmon_power_crit_alarm:
454		return 0444;
455	case hwmon_power_max:
456	case hwmon_power_crit:
457		return 0644;
458	}
459
460	return 0;
461}
462
463static const struct hwmon_ops power_hwmon_ops = {
464	.is_visible = power_hwmon_attrs_visible,
465	.read = power_hwmon_read,
466	.write = power_hwmon_write,
467};
468
469static const struct hwmon_channel_info * const power_hwmon_info[] = {
470	HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
471				  HWMON_P_MAX   | HWMON_P_MAX_ALARM |
472				  HWMON_P_CRIT  | HWMON_P_CRIT_ALARM),
473	NULL
474};
475
476static const struct hwmon_chip_info power_hwmon_chip_info = {
477	.ops = &power_hwmon_ops,
478	.info = power_hwmon_info,
479};
480
481static ssize_t power1_xeon_limit_show(struct device *dev,
482				      struct device_attribute *attr, char *buf)
483{
484	struct dfl_feature *feature = dev_get_drvdata(dev);
485	u16 xeon_limit = 0;
486	u64 v;
487
488	v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
489
490	if (FIELD_GET(XEON_PWR_EN, v))
491		xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
492
493	return sprintf(buf, "%u\n", xeon_limit * 100000);
494}
495
496static ssize_t power1_fpga_limit_show(struct device *dev,
497				      struct device_attribute *attr, char *buf)
498{
499	struct dfl_feature *feature = dev_get_drvdata(dev);
500	u16 fpga_limit = 0;
501	u64 v;
502
503	v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
504
505	if (FIELD_GET(FPGA_PWR_EN, v))
506		fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
507
508	return sprintf(buf, "%u\n", fpga_limit * 100000);
509}
510
511static ssize_t power1_ltr_show(struct device *dev,
512			       struct device_attribute *attr, char *buf)
513{
514	struct dfl_feature *feature = dev_get_drvdata(dev);
515	u64 v;
516
517	v = readq(feature->ioaddr + FME_PWR_STATUS);
518
519	return sprintf(buf, "%u\n",
520		       (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
521}
522
523static DEVICE_ATTR_RO(power1_xeon_limit);
524static DEVICE_ATTR_RO(power1_fpga_limit);
525static DEVICE_ATTR_RO(power1_ltr);
526
527static struct attribute *power_extra_attrs[] = {
528	&dev_attr_power1_xeon_limit.attr,
529	&dev_attr_power1_fpga_limit.attr,
530	&dev_attr_power1_ltr.attr,
531	NULL
532};
533
534ATTRIBUTE_GROUPS(power_extra);
535
536static int fme_power_mgmt_init(struct platform_device *pdev,
537			       struct dfl_feature *feature)
538{
539	struct device *hwmon;
540
541	hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
542						     "dfl_fme_power", feature,
543						     &power_hwmon_chip_info,
544						     power_extra_groups);
545	if (IS_ERR(hwmon)) {
546		dev_err(&pdev->dev, "Fail to register power hwmon\n");
547		return PTR_ERR(hwmon);
548	}
549
550	return 0;
551}
552
553static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
554	{.id = FME_FEATURE_ID_POWER_MGMT,},
555	{0,}
556};
557
558static const struct dfl_feature_ops fme_power_mgmt_ops = {
559	.init = fme_power_mgmt_init,
560};
561
562static struct dfl_feature_driver fme_feature_drvs[] = {
563	{
564		.id_table = fme_hdr_id_table,
565		.ops = &fme_hdr_ops,
566	},
567	{
568		.id_table = fme_pr_mgmt_id_table,
569		.ops = &fme_pr_mgmt_ops,
570	},
571	{
572		.id_table = fme_global_err_id_table,
573		.ops = &fme_global_err_ops,
574	},
575	{
576		.id_table = fme_thermal_mgmt_id_table,
577		.ops = &fme_thermal_mgmt_ops,
578	},
579	{
580		.id_table = fme_power_mgmt_id_table,
581		.ops = &fme_power_mgmt_ops,
582	},
583	{
584		.id_table = fme_perf_id_table,
585		.ops = &fme_perf_ops,
586	},
587	{
588		.ops = NULL,
589	},
590};
591
592static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
593				      unsigned long arg)
594{
595	/* No extension support for now */
596	return 0;
597}
598
599static int fme_open(struct inode *inode, struct file *filp)
600{
601	struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
602	struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
603	int ret;
604
605	if (WARN_ON(!pdata))
606		return -ENODEV;
607
608	mutex_lock(&pdata->lock);
609	ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
610	if (!ret) {
611		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
612			dfl_feature_dev_use_count(pdata));
613		filp->private_data = pdata;
614	}
615	mutex_unlock(&pdata->lock);
616
617	return ret;
618}
619
620static int fme_release(struct inode *inode, struct file *filp)
621{
622	struct dfl_feature_platform_data *pdata = filp->private_data;
623	struct platform_device *pdev = pdata->dev;
624	struct dfl_feature *feature;
625
626	dev_dbg(&pdev->dev, "Device File Release\n");
627
628	mutex_lock(&pdata->lock);
629	dfl_feature_dev_use_end(pdata);
630
631	if (!dfl_feature_dev_use_count(pdata))
632		dfl_fpga_dev_for_each_feature(pdata, feature)
633			dfl_fpga_set_irq_triggers(feature, 0,
634						  feature->nr_irqs, NULL);
635	mutex_unlock(&pdata->lock);
636
637	return 0;
638}
639
640static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
641{
642	struct dfl_feature_platform_data *pdata = filp->private_data;
643	struct platform_device *pdev = pdata->dev;
644	struct dfl_feature *f;
645	long ret;
646
647	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
648
649	switch (cmd) {
650	case DFL_FPGA_GET_API_VERSION:
651		return DFL_FPGA_API_VERSION;
652	case DFL_FPGA_CHECK_EXTENSION:
653		return fme_ioctl_check_extension(pdata, arg);
654	default:
655		/*
656		 * Let sub-feature's ioctl function to handle the cmd.
657		 * Sub-feature's ioctl returns -ENODEV when cmd is not
658		 * handled in this sub feature, and returns 0 or other
659		 * error code if cmd is handled.
660		 */
661		dfl_fpga_dev_for_each_feature(pdata, f) {
662			if (f->ops && f->ops->ioctl) {
663				ret = f->ops->ioctl(pdev, f, cmd, arg);
664				if (ret != -ENODEV)
665					return ret;
666			}
667		}
668	}
669
670	return -EINVAL;
671}
672
673static int fme_dev_init(struct platform_device *pdev)
674{
675	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
676	struct dfl_fme *fme;
677
678	fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
679	if (!fme)
680		return -ENOMEM;
681
682	fme->pdata = pdata;
683
684	mutex_lock(&pdata->lock);
685	dfl_fpga_pdata_set_private(pdata, fme);
686	mutex_unlock(&pdata->lock);
687
688	return 0;
689}
690
691static void fme_dev_destroy(struct platform_device *pdev)
692{
693	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
694
695	mutex_lock(&pdata->lock);
696	dfl_fpga_pdata_set_private(pdata, NULL);
697	mutex_unlock(&pdata->lock);
698}
699
700static const struct file_operations fme_fops = {
701	.owner		= THIS_MODULE,
702	.open		= fme_open,
703	.release	= fme_release,
704	.unlocked_ioctl = fme_ioctl,
705};
706
707static int fme_probe(struct platform_device *pdev)
708{
709	int ret;
710
711	ret = fme_dev_init(pdev);
712	if (ret)
713		goto exit;
714
715	ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
716	if (ret)
717		goto dev_destroy;
718
719	ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
720	if (ret)
721		goto feature_uinit;
722
723	return 0;
724
725feature_uinit:
726	dfl_fpga_dev_feature_uinit(pdev);
727dev_destroy:
728	fme_dev_destroy(pdev);
729exit:
730	return ret;
731}
732
733static void fme_remove(struct platform_device *pdev)
734{
735	dfl_fpga_dev_ops_unregister(pdev);
736	dfl_fpga_dev_feature_uinit(pdev);
737	fme_dev_destroy(pdev);
 
 
738}
739
740static const struct attribute_group *fme_dev_groups[] = {
741	&fme_hdr_group,
742	&fme_global_err_group,
743	NULL
744};
745
746static struct platform_driver fme_driver = {
747	.driver	= {
748		.name       = DFL_FPGA_FEATURE_DEV_FME,
749		.dev_groups = fme_dev_groups,
750	},
751	.probe   = fme_probe,
752	.remove_new = fme_remove,
753};
754
755module_platform_driver(fme_driver);
756
757MODULE_DESCRIPTION("FPGA Management Engine driver");
758MODULE_AUTHOR("Intel Corporation");
759MODULE_LICENSE("GPL v2");
760MODULE_ALIAS("platform:dfl-fme");