Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Management Engine Error Management
  4 *
  5 * Copyright 2019 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Kang Luwei <luwei.kang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Wu Hao <hao.wu@intel.com>
 11 *   Joseph Grecco <joe.grecco@intel.com>
 12 *   Enno Luebbers <enno.luebbers@intel.com>
 13 *   Tim Whisonant <tim.whisonant@intel.com>
 14 *   Ananda Ravuri <ananda.ravuri@intel.com>
 15 *   Mitchel, Henry <henry.mitchel@intel.com>
 16 */
 17
 18#include <linux/fpga-dfl.h>
 19#include <linux/uaccess.h>
 20
 21#include "dfl.h"
 22#include "dfl-fme.h"
 23
 24#define FME_ERROR_MASK		0x8
 25#define FME_ERROR		0x10
 26#define MBP_ERROR		BIT_ULL(6)
 27#define PCIE0_ERROR_MASK	0x18
 28#define PCIE0_ERROR		0x20
 29#define PCIE1_ERROR_MASK	0x28
 30#define PCIE1_ERROR		0x30
 31#define FME_FIRST_ERROR		0x38
 32#define FME_NEXT_ERROR		0x40
 33#define RAS_NONFAT_ERROR_MASK	0x48
 34#define RAS_NONFAT_ERROR	0x50
 35#define RAS_CATFAT_ERROR_MASK	0x58
 36#define RAS_CATFAT_ERROR	0x60
 37#define RAS_ERROR_INJECT	0x68
 38#define INJECT_ERROR_MASK	GENMASK_ULL(2, 0)
 39
 40#define ERROR_MASK		GENMASK_ULL(63, 0)
 41
 42static ssize_t pcie0_errors_show(struct device *dev,
 43				 struct device_attribute *attr, char *buf)
 44{
 45	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 46	void __iomem *base;
 47	u64 value;
 48
 49	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
 50
 51	mutex_lock(&pdata->lock);
 52	value = readq(base + PCIE0_ERROR);
 53	mutex_unlock(&pdata->lock);
 54
 55	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
 56}
 57
 58static ssize_t pcie0_errors_store(struct device *dev,
 59				  struct device_attribute *attr,
 60				  const char *buf, size_t count)
 61{
 62	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 63	void __iomem *base;
 64	int ret = 0;
 65	u64 v, val;
 66
 67	if (kstrtou64(buf, 0, &val))
 68		return -EINVAL;
 69
 70	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
 71
 72	mutex_lock(&pdata->lock);
 73	writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
 74
 75	v = readq(base + PCIE0_ERROR);
 76	if (val == v)
 77		writeq(v, base + PCIE0_ERROR);
 78	else
 79		ret = -EINVAL;
 80
 81	writeq(0ULL, base + PCIE0_ERROR_MASK);
 82	mutex_unlock(&pdata->lock);
 83	return ret ? ret : count;
 84}
 85static DEVICE_ATTR_RW(pcie0_errors);
 86
 87static ssize_t pcie1_errors_show(struct device *dev,
 88				 struct device_attribute *attr, char *buf)
 89{
 90	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 91	void __iomem *base;
 92	u64 value;
 93
 94	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
 95
 96	mutex_lock(&pdata->lock);
 97	value = readq(base + PCIE1_ERROR);
 98	mutex_unlock(&pdata->lock);
 99
100	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
101}
102
103static ssize_t pcie1_errors_store(struct device *dev,
104				  struct device_attribute *attr,
105				  const char *buf, size_t count)
106{
107	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
108	void __iomem *base;
109	int ret = 0;
110	u64 v, val;
111
112	if (kstrtou64(buf, 0, &val))
113		return -EINVAL;
114
115	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
116
117	mutex_lock(&pdata->lock);
118	writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
119
120	v = readq(base + PCIE1_ERROR);
121	if (val == v)
122		writeq(v, base + PCIE1_ERROR);
123	else
124		ret = -EINVAL;
125
126	writeq(0ULL, base + PCIE1_ERROR_MASK);
127	mutex_unlock(&pdata->lock);
128	return ret ? ret : count;
129}
130static DEVICE_ATTR_RW(pcie1_errors);
131
132static ssize_t nonfatal_errors_show(struct device *dev,
133				    struct device_attribute *attr, char *buf)
134{
135	void __iomem *base;
136
137	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
138
139	return sprintf(buf, "0x%llx\n",
140		       (unsigned long long)readq(base + RAS_NONFAT_ERROR));
141}
142static DEVICE_ATTR_RO(nonfatal_errors);
143
144static ssize_t catfatal_errors_show(struct device *dev,
145				    struct device_attribute *attr, char *buf)
146{
147	void __iomem *base;
148
149	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
150
151	return sprintf(buf, "0x%llx\n",
152		       (unsigned long long)readq(base + RAS_CATFAT_ERROR));
153}
154static DEVICE_ATTR_RO(catfatal_errors);
155
156static ssize_t inject_errors_show(struct device *dev,
157				  struct device_attribute *attr, char *buf)
158{
159	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
160	void __iomem *base;
161	u64 v;
162
163	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
164
165	mutex_lock(&pdata->lock);
166	v = readq(base + RAS_ERROR_INJECT);
167	mutex_unlock(&pdata->lock);
168
169	return sprintf(buf, "0x%llx\n",
170		       (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
171}
172
173static ssize_t inject_errors_store(struct device *dev,
174				   struct device_attribute *attr,
175				   const char *buf, size_t count)
176{
177	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
178	void __iomem *base;
179	u8 inject_error;
180	u64 v;
181
182	if (kstrtou8(buf, 0, &inject_error))
183		return -EINVAL;
184
185	if (inject_error & ~INJECT_ERROR_MASK)
186		return -EINVAL;
187
188	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
189
190	mutex_lock(&pdata->lock);
191	v = readq(base + RAS_ERROR_INJECT);
192	v &= ~INJECT_ERROR_MASK;
193	v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
194	writeq(v, base + RAS_ERROR_INJECT);
195	mutex_unlock(&pdata->lock);
196
197	return count;
198}
199static DEVICE_ATTR_RW(inject_errors);
200
201static ssize_t fme_errors_show(struct device *dev,
202			       struct device_attribute *attr, char *buf)
203{
204	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
205	void __iomem *base;
206	u64 value;
207
208	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
209
210	mutex_lock(&pdata->lock);
211	value = readq(base + FME_ERROR);
212	mutex_unlock(&pdata->lock);
213
214	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
215}
216
217static ssize_t fme_errors_store(struct device *dev,
218				struct device_attribute *attr,
219				const char *buf, size_t count)
220{
221	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
222	void __iomem *base;
223	u64 v, val;
224	int ret = 0;
225
226	if (kstrtou64(buf, 0, &val))
227		return -EINVAL;
228
229	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
230
231	mutex_lock(&pdata->lock);
232	writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
233
234	v = readq(base + FME_ERROR);
235	if (val == v)
236		writeq(v, base + FME_ERROR);
237	else
238		ret = -EINVAL;
239
240	/* Workaround: disable MBP_ERROR if feature revision is 0 */
241	writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
242	       base + FME_ERROR_MASK);
243	mutex_unlock(&pdata->lock);
244	return ret ? ret : count;
245}
246static DEVICE_ATTR_RW(fme_errors);
247
248static ssize_t first_error_show(struct device *dev,
249				struct device_attribute *attr, char *buf)
250{
251	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
252	void __iomem *base;
253	u64 value;
254
255	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
256
257	mutex_lock(&pdata->lock);
258	value = readq(base + FME_FIRST_ERROR);
259	mutex_unlock(&pdata->lock);
260
261	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
262}
263static DEVICE_ATTR_RO(first_error);
264
265static ssize_t next_error_show(struct device *dev,
266			       struct device_attribute *attr, char *buf)
267{
268	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
269	void __iomem *base;
270	u64 value;
271
272	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
273
274	mutex_lock(&pdata->lock);
275	value = readq(base + FME_NEXT_ERROR);
276	mutex_unlock(&pdata->lock);
277
278	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
279}
280static DEVICE_ATTR_RO(next_error);
281
282static struct attribute *fme_global_err_attrs[] = {
283	&dev_attr_pcie0_errors.attr,
284	&dev_attr_pcie1_errors.attr,
285	&dev_attr_nonfatal_errors.attr,
286	&dev_attr_catfatal_errors.attr,
287	&dev_attr_inject_errors.attr,
288	&dev_attr_fme_errors.attr,
289	&dev_attr_first_error.attr,
290	&dev_attr_next_error.attr,
291	NULL,
292};
293
294static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
295					    struct attribute *attr, int n)
296{
297	struct device *dev = kobj_to_dev(kobj);
298
299	/*
300	 * sysfs entries are visible only if related private feature is
301	 * enumerated.
302	 */
303	if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
304		return 0;
305
306	return attr->mode;
307}
308
309const struct attribute_group fme_global_err_group = {
310	.name       = "errors",
311	.attrs      = fme_global_err_attrs,
312	.is_visible = fme_global_err_attrs_visible,
313};
314
315static void fme_err_mask(struct device *dev, bool mask)
316{
317	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
318	void __iomem *base;
319
320	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
321
322	mutex_lock(&pdata->lock);
323
324	/* Workaround: keep MBP_ERROR always masked if revision is 0 */
325	if (dfl_feature_revision(base))
326		writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
327	else
328		writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
329
330	writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
331	writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
332	writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
333	writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
334
335	mutex_unlock(&pdata->lock);
336}
337
338static int fme_global_err_init(struct platform_device *pdev,
339			       struct dfl_feature *feature)
340{
341	fme_err_mask(&pdev->dev, false);
342
343	return 0;
344}
345
346static void fme_global_err_uinit(struct platform_device *pdev,
347				 struct dfl_feature *feature)
348{
349	fme_err_mask(&pdev->dev, true);
350}
351
352static long
353fme_global_error_ioctl(struct platform_device *pdev,
354		       struct dfl_feature *feature,
355		       unsigned int cmd, unsigned long arg)
356{
357	switch (cmd) {
358	case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
359		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
360	case DFL_FPGA_FME_ERR_SET_IRQ:
361		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
362	default:
363		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
364		return -ENODEV;
365	}
366}
367
368const struct dfl_feature_id fme_global_err_id_table[] = {
369	{.id = FME_FEATURE_ID_GLOBAL_ERR,},
370	{0,}
371};
372
373const struct dfl_feature_ops fme_global_err_ops = {
374	.init = fme_global_err_init,
375	.uinit = fme_global_err_uinit,
376	.ioctl = fme_global_error_ioctl,
377};
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Management Engine Error Management
  4 *
  5 * Copyright 2019 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Kang Luwei <luwei.kang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Wu Hao <hao.wu@intel.com>
 11 *   Joseph Grecco <joe.grecco@intel.com>
 12 *   Enno Luebbers <enno.luebbers@intel.com>
 13 *   Tim Whisonant <tim.whisonant@intel.com>
 14 *   Ananda Ravuri <ananda.ravuri@intel.com>
 15 *   Mitchel, Henry <henry.mitchel@intel.com>
 16 */
 17
 18#include <linux/fpga-dfl.h>
 19#include <linux/uaccess.h>
 20
 21#include "dfl.h"
 22#include "dfl-fme.h"
 23
 24#define FME_ERROR_MASK		0x8
 25#define FME_ERROR		0x10
 26#define MBP_ERROR		BIT_ULL(6)
 27#define PCIE0_ERROR_MASK	0x18
 28#define PCIE0_ERROR		0x20
 29#define PCIE1_ERROR_MASK	0x28
 30#define PCIE1_ERROR		0x30
 31#define FME_FIRST_ERROR		0x38
 32#define FME_NEXT_ERROR		0x40
 33#define RAS_NONFAT_ERROR_MASK	0x48
 34#define RAS_NONFAT_ERROR	0x50
 35#define RAS_CATFAT_ERROR_MASK	0x58
 36#define RAS_CATFAT_ERROR	0x60
 37#define RAS_ERROR_INJECT	0x68
 38#define INJECT_ERROR_MASK	GENMASK_ULL(2, 0)
 39
 40#define ERROR_MASK		GENMASK_ULL(63, 0)
 41
 42static ssize_t pcie0_errors_show(struct device *dev,
 43				 struct device_attribute *attr, char *buf)
 44{
 45	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 46	void __iomem *base;
 47	u64 value;
 48
 49	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
 50
 51	mutex_lock(&pdata->lock);
 52	value = readq(base + PCIE0_ERROR);
 53	mutex_unlock(&pdata->lock);
 54
 55	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
 56}
 57
 58static ssize_t pcie0_errors_store(struct device *dev,
 59				  struct device_attribute *attr,
 60				  const char *buf, size_t count)
 61{
 62	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 63	void __iomem *base;
 64	int ret = 0;
 65	u64 v, val;
 66
 67	if (kstrtou64(buf, 0, &val))
 68		return -EINVAL;
 69
 70	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
 71
 72	mutex_lock(&pdata->lock);
 73	writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
 74
 75	v = readq(base + PCIE0_ERROR);
 76	if (val == v)
 77		writeq(v, base + PCIE0_ERROR);
 78	else
 79		ret = -EINVAL;
 80
 81	writeq(0ULL, base + PCIE0_ERROR_MASK);
 82	mutex_unlock(&pdata->lock);
 83	return ret ? ret : count;
 84}
 85static DEVICE_ATTR_RW(pcie0_errors);
 86
 87static ssize_t pcie1_errors_show(struct device *dev,
 88				 struct device_attribute *attr, char *buf)
 89{
 90	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 91	void __iomem *base;
 92	u64 value;
 93
 94	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
 95
 96	mutex_lock(&pdata->lock);
 97	value = readq(base + PCIE1_ERROR);
 98	mutex_unlock(&pdata->lock);
 99
100	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
101}
102
103static ssize_t pcie1_errors_store(struct device *dev,
104				  struct device_attribute *attr,
105				  const char *buf, size_t count)
106{
107	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
108	void __iomem *base;
109	int ret = 0;
110	u64 v, val;
111
112	if (kstrtou64(buf, 0, &val))
113		return -EINVAL;
114
115	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
116
117	mutex_lock(&pdata->lock);
118	writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
119
120	v = readq(base + PCIE1_ERROR);
121	if (val == v)
122		writeq(v, base + PCIE1_ERROR);
123	else
124		ret = -EINVAL;
125
126	writeq(0ULL, base + PCIE1_ERROR_MASK);
127	mutex_unlock(&pdata->lock);
128	return ret ? ret : count;
129}
130static DEVICE_ATTR_RW(pcie1_errors);
131
132static ssize_t nonfatal_errors_show(struct device *dev,
133				    struct device_attribute *attr, char *buf)
134{
135	void __iomem *base;
136
137	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
138
139	return sprintf(buf, "0x%llx\n",
140		       (unsigned long long)readq(base + RAS_NONFAT_ERROR));
141}
142static DEVICE_ATTR_RO(nonfatal_errors);
143
144static ssize_t catfatal_errors_show(struct device *dev,
145				    struct device_attribute *attr, char *buf)
146{
147	void __iomem *base;
148
149	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
150
151	return sprintf(buf, "0x%llx\n",
152		       (unsigned long long)readq(base + RAS_CATFAT_ERROR));
153}
154static DEVICE_ATTR_RO(catfatal_errors);
155
156static ssize_t inject_errors_show(struct device *dev,
157				  struct device_attribute *attr, char *buf)
158{
159	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
160	void __iomem *base;
161	u64 v;
162
163	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
164
165	mutex_lock(&pdata->lock);
166	v = readq(base + RAS_ERROR_INJECT);
167	mutex_unlock(&pdata->lock);
168
169	return sprintf(buf, "0x%llx\n",
170		       (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
171}
172
173static ssize_t inject_errors_store(struct device *dev,
174				   struct device_attribute *attr,
175				   const char *buf, size_t count)
176{
177	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
178	void __iomem *base;
179	u8 inject_error;
180	u64 v;
181
182	if (kstrtou8(buf, 0, &inject_error))
183		return -EINVAL;
184
185	if (inject_error & ~INJECT_ERROR_MASK)
186		return -EINVAL;
187
188	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
189
190	mutex_lock(&pdata->lock);
191	v = readq(base + RAS_ERROR_INJECT);
192	v &= ~INJECT_ERROR_MASK;
193	v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
194	writeq(v, base + RAS_ERROR_INJECT);
195	mutex_unlock(&pdata->lock);
196
197	return count;
198}
199static DEVICE_ATTR_RW(inject_errors);
200
201static ssize_t fme_errors_show(struct device *dev,
202			       struct device_attribute *attr, char *buf)
203{
204	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
205	void __iomem *base;
206	u64 value;
207
208	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
209
210	mutex_lock(&pdata->lock);
211	value = readq(base + FME_ERROR);
212	mutex_unlock(&pdata->lock);
213
214	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
215}
216
217static ssize_t fme_errors_store(struct device *dev,
218				struct device_attribute *attr,
219				const char *buf, size_t count)
220{
221	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
222	void __iomem *base;
223	u64 v, val;
224	int ret = 0;
225
226	if (kstrtou64(buf, 0, &val))
227		return -EINVAL;
228
229	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
230
231	mutex_lock(&pdata->lock);
232	writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
233
234	v = readq(base + FME_ERROR);
235	if (val == v)
236		writeq(v, base + FME_ERROR);
237	else
238		ret = -EINVAL;
239
240	/* Workaround: disable MBP_ERROR if feature revision is 0 */
241	writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
242	       base + FME_ERROR_MASK);
243	mutex_unlock(&pdata->lock);
244	return ret ? ret : count;
245}
246static DEVICE_ATTR_RW(fme_errors);
247
248static ssize_t first_error_show(struct device *dev,
249				struct device_attribute *attr, char *buf)
250{
251	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
252	void __iomem *base;
253	u64 value;
254
255	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
256
257	mutex_lock(&pdata->lock);
258	value = readq(base + FME_FIRST_ERROR);
259	mutex_unlock(&pdata->lock);
260
261	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
262}
263static DEVICE_ATTR_RO(first_error);
264
265static ssize_t next_error_show(struct device *dev,
266			       struct device_attribute *attr, char *buf)
267{
268	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
269	void __iomem *base;
270	u64 value;
271
272	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
273
274	mutex_lock(&pdata->lock);
275	value = readq(base + FME_NEXT_ERROR);
276	mutex_unlock(&pdata->lock);
277
278	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
279}
280static DEVICE_ATTR_RO(next_error);
281
282static struct attribute *fme_global_err_attrs[] = {
283	&dev_attr_pcie0_errors.attr,
284	&dev_attr_pcie1_errors.attr,
285	&dev_attr_nonfatal_errors.attr,
286	&dev_attr_catfatal_errors.attr,
287	&dev_attr_inject_errors.attr,
288	&dev_attr_fme_errors.attr,
289	&dev_attr_first_error.attr,
290	&dev_attr_next_error.attr,
291	NULL,
292};
293
294static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
295					    struct attribute *attr, int n)
296{
297	struct device *dev = kobj_to_dev(kobj);
298
299	/*
300	 * sysfs entries are visible only if related private feature is
301	 * enumerated.
302	 */
303	if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
304		return 0;
305
306	return attr->mode;
307}
308
309const struct attribute_group fme_global_err_group = {
310	.name       = "errors",
311	.attrs      = fme_global_err_attrs,
312	.is_visible = fme_global_err_attrs_visible,
313};
314
315static void fme_err_mask(struct device *dev, bool mask)
316{
317	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
318	void __iomem *base;
319
320	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
321
322	mutex_lock(&pdata->lock);
323
324	/* Workaround: keep MBP_ERROR always masked if revision is 0 */
325	if (dfl_feature_revision(base))
326		writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
327	else
328		writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
329
330	writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
331	writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
332	writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
333	writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
334
335	mutex_unlock(&pdata->lock);
336}
337
338static int fme_global_err_init(struct platform_device *pdev,
339			       struct dfl_feature *feature)
340{
341	fme_err_mask(&pdev->dev, false);
342
343	return 0;
344}
345
346static void fme_global_err_uinit(struct platform_device *pdev,
347				 struct dfl_feature *feature)
348{
349	fme_err_mask(&pdev->dev, true);
350}
351
352static long
353fme_global_error_ioctl(struct platform_device *pdev,
354		       struct dfl_feature *feature,
355		       unsigned int cmd, unsigned long arg)
356{
357	switch (cmd) {
358	case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
359		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
360	case DFL_FPGA_FME_ERR_SET_IRQ:
361		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
362	default:
363		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
364		return -ENODEV;
365	}
366}
367
368const struct dfl_feature_id fme_global_err_id_table[] = {
369	{.id = FME_FEATURE_ID_GLOBAL_ERR,},
370	{0,}
371};
372
373const struct dfl_feature_ops fme_global_err_ops = {
374	.init = fme_global_err_init,
375	.uinit = fme_global_err_uinit,
376	.ioctl = fme_global_error_ioctl,
377};