Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2011-2012 Calxeda, Inc.
4 */
5#include <linux/types.h>
6#include <linux/kernel.h>
7#include <linux/ctype.h>
8#include <linux/edac.h>
9#include <linux/interrupt.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/platform_device.h>
13#include <linux/uaccess.h>
14
15#include "edac_module.h"
16
17/* DDR Ctrlr Error Registers */
18
19#define HB_DDR_ECC_ERR_BASE 0x128
20#define MW_DDR_ECC_ERR_BASE 0x1b4
21
22#define HB_DDR_ECC_OPT 0x00
23#define HB_DDR_ECC_U_ERR_ADDR 0x08
24#define HB_DDR_ECC_U_ERR_STAT 0x0c
25#define HB_DDR_ECC_U_ERR_DATAL 0x10
26#define HB_DDR_ECC_U_ERR_DATAH 0x14
27#define HB_DDR_ECC_C_ERR_ADDR 0x18
28#define HB_DDR_ECC_C_ERR_STAT 0x1c
29#define HB_DDR_ECC_C_ERR_DATAL 0x20
30#define HB_DDR_ECC_C_ERR_DATAH 0x24
31
32#define HB_DDR_ECC_OPT_MODE_MASK 0x3
33#define HB_DDR_ECC_OPT_FWC 0x100
34#define HB_DDR_ECC_OPT_XOR_SHIFT 16
35
36/* DDR Ctrlr Interrupt Registers */
37
38#define HB_DDR_ECC_INT_BASE 0x180
39#define MW_DDR_ECC_INT_BASE 0x218
40
41#define HB_DDR_ECC_INT_STATUS 0x00
42#define HB_DDR_ECC_INT_ACK 0x04
43
44#define HB_DDR_ECC_INT_STAT_CE 0x8
45#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
46#define HB_DDR_ECC_INT_STAT_UE 0x20
47#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
48
49struct hb_mc_drvdata {
50 void __iomem *mc_err_base;
51 void __iomem *mc_int_base;
52};
53
54static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
55{
56 struct mem_ctl_info *mci = dev_id;
57 struct hb_mc_drvdata *drvdata = mci->pvt_info;
58 u32 status, err_addr;
59
60 /* Read the interrupt status register */
61 status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
62
63 if (status & HB_DDR_ECC_INT_STAT_UE) {
64 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
65 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
66 err_addr >> PAGE_SHIFT,
67 err_addr & ~PAGE_MASK, 0,
68 0, 0, -1,
69 mci->ctl_name, "");
70 }
71 if (status & HB_DDR_ECC_INT_STAT_CE) {
72 u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
73 syndrome = (syndrome >> 8) & 0xff;
74 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
75 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
76 err_addr >> PAGE_SHIFT,
77 err_addr & ~PAGE_MASK, syndrome,
78 0, 0, -1,
79 mci->ctl_name, "");
80 }
81
82 /* clear the error, clears the interrupt */
83 writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
84 return IRQ_HANDLED;
85}
86
87static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
88{
89 struct hb_mc_drvdata *pdata = mci->pvt_info;
90 u32 reg;
91
92 reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
93 reg &= HB_DDR_ECC_OPT_MODE_MASK;
94 reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
95 writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
96}
97
98#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
99
100static ssize_t highbank_mc_inject_ctrl(struct device *dev,
101 struct device_attribute *attr, const char *buf, size_t count)
102{
103 struct mem_ctl_info *mci = to_mci(dev);
104 u8 synd;
105
106 if (kstrtou8(buf, 16, &synd))
107 return -EINVAL;
108
109 highbank_mc_err_inject(mci, synd);
110
111 return count;
112}
113
114static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
115
116static struct attribute *highbank_dev_attrs[] = {
117 &dev_attr_inject_ctrl.attr,
118 NULL
119};
120
121ATTRIBUTE_GROUPS(highbank_dev);
122
123struct hb_mc_settings {
124 int err_offset;
125 int int_offset;
126};
127
128static struct hb_mc_settings hb_settings = {
129 .err_offset = HB_DDR_ECC_ERR_BASE,
130 .int_offset = HB_DDR_ECC_INT_BASE,
131};
132
133static struct hb_mc_settings mw_settings = {
134 .err_offset = MW_DDR_ECC_ERR_BASE,
135 .int_offset = MW_DDR_ECC_INT_BASE,
136};
137
138static const struct of_device_id hb_ddr_ctrl_of_match[] = {
139 { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
140 { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
141 {},
142};
143MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
144
145static int highbank_mc_probe(struct platform_device *pdev)
146{
147 const struct of_device_id *id;
148 const struct hb_mc_settings *settings;
149 struct edac_mc_layer layers[2];
150 struct mem_ctl_info *mci;
151 struct hb_mc_drvdata *drvdata;
152 struct dimm_info *dimm;
153 struct resource *r;
154 void __iomem *base;
155 u32 control;
156 int irq;
157 int res = 0;
158
159 id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
160 if (!id)
161 return -ENODEV;
162
163 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
164 layers[0].size = 1;
165 layers[0].is_virt_csrow = true;
166 layers[1].type = EDAC_MC_LAYER_CHANNEL;
167 layers[1].size = 1;
168 layers[1].is_virt_csrow = false;
169 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
170 sizeof(struct hb_mc_drvdata));
171 if (!mci)
172 return -ENOMEM;
173
174 mci->pdev = &pdev->dev;
175 drvdata = mci->pvt_info;
176 platform_set_drvdata(pdev, mci);
177
178 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
179 res = -ENOMEM;
180 goto free;
181 }
182
183 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
184 if (!r) {
185 dev_err(&pdev->dev, "Unable to get mem resource\n");
186 res = -ENODEV;
187 goto err;
188 }
189
190 if (!devm_request_mem_region(&pdev->dev, r->start,
191 resource_size(r), dev_name(&pdev->dev))) {
192 dev_err(&pdev->dev, "Error while requesting mem region\n");
193 res = -EBUSY;
194 goto err;
195 }
196
197 base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
198 if (!base) {
199 dev_err(&pdev->dev, "Unable to map regs\n");
200 res = -ENOMEM;
201 goto err;
202 }
203
204 settings = id->data;
205 drvdata->mc_err_base = base + settings->err_offset;
206 drvdata->mc_int_base = base + settings->int_offset;
207
208 control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
209 if (!control || (control == 0x2)) {
210 dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
211 res = -ENODEV;
212 goto err;
213 }
214
215 mci->mtype_cap = MEM_FLAG_DDR3;
216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
217 mci->edac_cap = EDAC_FLAG_SECDED;
218 mci->mod_name = pdev->dev.driver->name;
219 mci->ctl_name = id->compatible;
220 mci->dev_name = dev_name(&pdev->dev);
221 mci->scrub_mode = SCRUB_SW_SRC;
222
223 /* Only a single 4GB DIMM is supported */
224 dimm = *mci->dimms;
225 dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
226 dimm->grain = 8;
227 dimm->dtype = DEV_X8;
228 dimm->mtype = MEM_DDR3;
229 dimm->edac_mode = EDAC_SECDED;
230
231 res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
232 if (res < 0)
233 goto err;
234
235 irq = platform_get_irq(pdev, 0);
236 res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
237 0, dev_name(&pdev->dev), mci);
238 if (res < 0) {
239 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
240 goto err2;
241 }
242
243 devres_close_group(&pdev->dev, NULL);
244 return 0;
245err2:
246 edac_mc_del_mc(&pdev->dev);
247err:
248 devres_release_group(&pdev->dev, NULL);
249free:
250 edac_mc_free(mci);
251 return res;
252}
253
254static void highbank_mc_remove(struct platform_device *pdev)
255{
256 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
257
258 edac_mc_del_mc(&pdev->dev);
259 edac_mc_free(mci);
260}
261
262static struct platform_driver highbank_mc_edac_driver = {
263 .probe = highbank_mc_probe,
264 .remove = highbank_mc_remove,
265 .driver = {
266 .name = "hb_mc_edac",
267 .of_match_table = hb_ddr_ctrl_of_match,
268 },
269};
270
271module_platform_driver(highbank_mc_edac_driver);
272
273MODULE_LICENSE("GPL v2");
274MODULE_AUTHOR("Calxeda, Inc.");
275MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/ctype.h>
19#include <linux/edac.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/of_platform.h>
23#include <linux/uaccess.h>
24
25#include "edac_module.h"
26
27/* DDR Ctrlr Error Registers */
28
29#define HB_DDR_ECC_ERR_BASE 0x128
30#define MW_DDR_ECC_ERR_BASE 0x1b4
31
32#define HB_DDR_ECC_OPT 0x00
33#define HB_DDR_ECC_U_ERR_ADDR 0x08
34#define HB_DDR_ECC_U_ERR_STAT 0x0c
35#define HB_DDR_ECC_U_ERR_DATAL 0x10
36#define HB_DDR_ECC_U_ERR_DATAH 0x14
37#define HB_DDR_ECC_C_ERR_ADDR 0x18
38#define HB_DDR_ECC_C_ERR_STAT 0x1c
39#define HB_DDR_ECC_C_ERR_DATAL 0x20
40#define HB_DDR_ECC_C_ERR_DATAH 0x24
41
42#define HB_DDR_ECC_OPT_MODE_MASK 0x3
43#define HB_DDR_ECC_OPT_FWC 0x100
44#define HB_DDR_ECC_OPT_XOR_SHIFT 16
45
46/* DDR Ctrlr Interrupt Registers */
47
48#define HB_DDR_ECC_INT_BASE 0x180
49#define MW_DDR_ECC_INT_BASE 0x218
50
51#define HB_DDR_ECC_INT_STATUS 0x00
52#define HB_DDR_ECC_INT_ACK 0x04
53
54#define HB_DDR_ECC_INT_STAT_CE 0x8
55#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
56#define HB_DDR_ECC_INT_STAT_UE 0x20
57#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
58
59struct hb_mc_drvdata {
60 void __iomem *mc_err_base;
61 void __iomem *mc_int_base;
62};
63
64static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
65{
66 struct mem_ctl_info *mci = dev_id;
67 struct hb_mc_drvdata *drvdata = mci->pvt_info;
68 u32 status, err_addr;
69
70 /* Read the interrupt status register */
71 status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
72
73 if (status & HB_DDR_ECC_INT_STAT_UE) {
74 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
75 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
76 err_addr >> PAGE_SHIFT,
77 err_addr & ~PAGE_MASK, 0,
78 0, 0, -1,
79 mci->ctl_name, "");
80 }
81 if (status & HB_DDR_ECC_INT_STAT_CE) {
82 u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
83 syndrome = (syndrome >> 8) & 0xff;
84 err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
85 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
86 err_addr >> PAGE_SHIFT,
87 err_addr & ~PAGE_MASK, syndrome,
88 0, 0, -1,
89 mci->ctl_name, "");
90 }
91
92 /* clear the error, clears the interrupt */
93 writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
94 return IRQ_HANDLED;
95}
96
97static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
98{
99 struct hb_mc_drvdata *pdata = mci->pvt_info;
100 u32 reg;
101
102 reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
103 reg &= HB_DDR_ECC_OPT_MODE_MASK;
104 reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
105 writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
106}
107
108#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
109
110static ssize_t highbank_mc_inject_ctrl(struct device *dev,
111 struct device_attribute *attr, const char *buf, size_t count)
112{
113 struct mem_ctl_info *mci = to_mci(dev);
114 u8 synd;
115
116 if (kstrtou8(buf, 16, &synd))
117 return -EINVAL;
118
119 highbank_mc_err_inject(mci, synd);
120
121 return count;
122}
123
124static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
125
126static struct attribute *highbank_dev_attrs[] = {
127 &dev_attr_inject_ctrl.attr,
128 NULL
129};
130
131ATTRIBUTE_GROUPS(highbank_dev);
132
133struct hb_mc_settings {
134 int err_offset;
135 int int_offset;
136};
137
138static struct hb_mc_settings hb_settings = {
139 .err_offset = HB_DDR_ECC_ERR_BASE,
140 .int_offset = HB_DDR_ECC_INT_BASE,
141};
142
143static struct hb_mc_settings mw_settings = {
144 .err_offset = MW_DDR_ECC_ERR_BASE,
145 .int_offset = MW_DDR_ECC_INT_BASE,
146};
147
148static const struct of_device_id hb_ddr_ctrl_of_match[] = {
149 { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
150 { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
151 {},
152};
153MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
154
155static int highbank_mc_probe(struct platform_device *pdev)
156{
157 const struct of_device_id *id;
158 const struct hb_mc_settings *settings;
159 struct edac_mc_layer layers[2];
160 struct mem_ctl_info *mci;
161 struct hb_mc_drvdata *drvdata;
162 struct dimm_info *dimm;
163 struct resource *r;
164 void __iomem *base;
165 u32 control;
166 int irq;
167 int res = 0;
168
169 id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
170 if (!id)
171 return -ENODEV;
172
173 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
174 layers[0].size = 1;
175 layers[0].is_virt_csrow = true;
176 layers[1].type = EDAC_MC_LAYER_CHANNEL;
177 layers[1].size = 1;
178 layers[1].is_virt_csrow = false;
179 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
180 sizeof(struct hb_mc_drvdata));
181 if (!mci)
182 return -ENOMEM;
183
184 mci->pdev = &pdev->dev;
185 drvdata = mci->pvt_info;
186 platform_set_drvdata(pdev, mci);
187
188 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
189 return -ENOMEM;
190
191 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
192 if (!r) {
193 dev_err(&pdev->dev, "Unable to get mem resource\n");
194 res = -ENODEV;
195 goto err;
196 }
197
198 if (!devm_request_mem_region(&pdev->dev, r->start,
199 resource_size(r), dev_name(&pdev->dev))) {
200 dev_err(&pdev->dev, "Error while requesting mem region\n");
201 res = -EBUSY;
202 goto err;
203 }
204
205 base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
206 if (!base) {
207 dev_err(&pdev->dev, "Unable to map regs\n");
208 res = -ENOMEM;
209 goto err;
210 }
211
212 settings = id->data;
213 drvdata->mc_err_base = base + settings->err_offset;
214 drvdata->mc_int_base = base + settings->int_offset;
215
216 control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
217 if (!control || (control == 0x2)) {
218 dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
219 res = -ENODEV;
220 goto err;
221 }
222
223 mci->mtype_cap = MEM_FLAG_DDR3;
224 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
225 mci->edac_cap = EDAC_FLAG_SECDED;
226 mci->mod_name = pdev->dev.driver->name;
227 mci->mod_ver = "1";
228 mci->ctl_name = id->compatible;
229 mci->dev_name = dev_name(&pdev->dev);
230 mci->scrub_mode = SCRUB_SW_SRC;
231
232 /* Only a single 4GB DIMM is supported */
233 dimm = *mci->dimms;
234 dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
235 dimm->grain = 8;
236 dimm->dtype = DEV_X8;
237 dimm->mtype = MEM_DDR3;
238 dimm->edac_mode = EDAC_SECDED;
239
240 res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
241 if (res < 0)
242 goto err;
243
244 irq = platform_get_irq(pdev, 0);
245 res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
246 0, dev_name(&pdev->dev), mci);
247 if (res < 0) {
248 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
249 goto err2;
250 }
251
252 devres_close_group(&pdev->dev, NULL);
253 return 0;
254err2:
255 edac_mc_del_mc(&pdev->dev);
256err:
257 devres_release_group(&pdev->dev, NULL);
258 edac_mc_free(mci);
259 return res;
260}
261
262static int highbank_mc_remove(struct platform_device *pdev)
263{
264 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
265
266 edac_mc_del_mc(&pdev->dev);
267 edac_mc_free(mci);
268 return 0;
269}
270
271static struct platform_driver highbank_mc_edac_driver = {
272 .probe = highbank_mc_probe,
273 .remove = highbank_mc_remove,
274 .driver = {
275 .name = "hb_mc_edac",
276 .of_match_table = hb_ddr_ctrl_of_match,
277 },
278};
279
280module_platform_driver(highbank_mc_edac_driver);
281
282MODULE_LICENSE("GPL v2");
283MODULE_AUTHOR("Calxeda, Inc.");
284MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");