Loading...
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/* Copyright 2017-2019 NXP */
3
4#include "enetc.h"
5
6int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
7 struct enetc_cbdr *cbdr)
8{
9 int size = bd_count * sizeof(struct enetc_cbd);
10
11 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
12 GFP_KERNEL);
13 if (!cbdr->bd_base)
14 return -ENOMEM;
15
16 /* h/w requires 128B alignment */
17 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
18 dma_free_coherent(dev, size, cbdr->bd_base,
19 cbdr->bd_dma_base);
20 return -EINVAL;
21 }
22
23 cbdr->next_to_clean = 0;
24 cbdr->next_to_use = 0;
25 cbdr->dma_dev = dev;
26 cbdr->bd_count = bd_count;
27
28 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
29 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
30 cbdr->mr = hw->reg + ENETC_SICBDRMR;
31
32 /* set CBDR cache attributes */
33 enetc_wr(hw, ENETC_SICAR2,
34 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
35
36 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
37 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
38 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
39
40 enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
41 enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
42 /* enable ring */
43 enetc_wr_reg(cbdr->mr, BIT(31));
44
45 return 0;
46}
47EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
48
49void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
50{
51 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
52
53 /* disable ring */
54 enetc_wr_reg(cbdr->mr, 0);
55
56 dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
57 cbdr->bd_dma_base);
58 cbdr->bd_base = NULL;
59 cbdr->dma_dev = NULL;
60}
61EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
62
63static void enetc_clean_cbdr(struct enetc_cbdr *ring)
64{
65 struct enetc_cbd *dest_cbd;
66 int i, status;
67
68 i = ring->next_to_clean;
69
70 while (enetc_rd_reg(ring->cir) != i) {
71 dest_cbd = ENETC_CBD(*ring, i);
72 status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
73 if (status)
74 dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
75 status, dest_cbd->cmd);
76
77 memset(dest_cbd, 0, sizeof(*dest_cbd));
78
79 i = (i + 1) % ring->bd_count;
80 }
81
82 ring->next_to_clean = i;
83}
84
85static int enetc_cbd_unused(struct enetc_cbdr *r)
86{
87 return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
88 r->bd_count;
89}
90
91int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
92{
93 struct enetc_cbdr *ring = &si->cbd_ring;
94 int timeout = ENETC_CBDR_TIMEOUT;
95 struct enetc_cbd *dest_cbd;
96 int i;
97
98 if (unlikely(!ring->bd_base))
99 return -EIO;
100
101 if (unlikely(!enetc_cbd_unused(ring)))
102 enetc_clean_cbdr(ring);
103
104 i = ring->next_to_use;
105 dest_cbd = ENETC_CBD(*ring, i);
106
107 /* copy command to the ring */
108 *dest_cbd = *cbd;
109 i = (i + 1) % ring->bd_count;
110
111 ring->next_to_use = i;
112 /* let H/W know BD ring has been updated */
113 enetc_wr_reg(ring->pir, i);
114
115 do {
116 if (enetc_rd_reg(ring->cir) == i)
117 break;
118 udelay(10); /* cannot sleep, rtnl_lock() */
119 timeout -= 10;
120 } while (timeout);
121
122 if (!timeout)
123 return -EBUSY;
124
125 /* CBD may writeback data, feedback up level */
126 *cbd = *dest_cbd;
127
128 enetc_clean_cbdr(ring);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(enetc_send_cmd);
133
134int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
135{
136 struct enetc_cbd cbd;
137
138 memset(&cbd, 0, sizeof(cbd));
139
140 cbd.cls = 1;
141 cbd.status_flags = ENETC_CBD_FLAGS_SF;
142 cbd.index = cpu_to_le16(index);
143
144 return enetc_send_cmd(si, &cbd);
145}
146EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
147
148int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
149 char *mac_addr, int si_map)
150{
151 struct enetc_cbd cbd;
152 u32 upper;
153 u16 lower;
154
155 memset(&cbd, 0, sizeof(cbd));
156
157 /* fill up the "set" descriptor */
158 cbd.cls = 1;
159 cbd.status_flags = ENETC_CBD_FLAGS_SF;
160 cbd.index = cpu_to_le16(index);
161 cbd.opt[3] = cpu_to_le32(si_map);
162 /* enable entry */
163 cbd.opt[0] = cpu_to_le32(BIT(31));
164
165 upper = *(const u32 *)mac_addr;
166 lower = *(const u16 *)(mac_addr + 4);
167 cbd.addr[0] = cpu_to_le32(upper);
168 cbd.addr[1] = cpu_to_le32(lower);
169
170 return enetc_send_cmd(si, &cbd);
171}
172EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
173
174/* Set entry in RFS table */
175int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
176 int index)
177{
178 struct enetc_cbdr *ring = &si->cbd_ring;
179 struct enetc_cbd cbd = {.cmd = 0};
180 void *tmp, *tmp_align;
181 dma_addr_t dma;
182 int err;
183
184 /* fill up the "set" descriptor */
185 cbd.cmd = 0;
186 cbd.cls = 4;
187 cbd.index = cpu_to_le16(index);
188 cbd.opt[3] = cpu_to_le32(0); /* SI */
189
190 tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
191 &dma, &tmp_align);
192 if (!tmp)
193 return -ENOMEM;
194
195 memcpy(tmp_align, rfse, sizeof(*rfse));
196
197 err = enetc_send_cmd(si, &cbd);
198 if (err)
199 dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
200
201 enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
202
203 return err;
204}
205EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
206
207static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
208 bool read)
209{
210 struct enetc_cbdr *ring = &si->cbd_ring;
211 struct enetc_cbd cbd = {.cmd = 0};
212 u8 *tmp, *tmp_align;
213 dma_addr_t dma;
214 int err, i;
215
216 if (count < ENETC_CBD_DATA_MEM_ALIGN)
217 /* HW only takes in a full 64 entry table */
218 return -EINVAL;
219
220 tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
221 &dma, (void *)&tmp_align);
222 if (!tmp)
223 return -ENOMEM;
224
225 if (!read)
226 for (i = 0; i < count; i++)
227 tmp_align[i] = (u8)(table[i]);
228
229 /* fill up the descriptor */
230 cbd.cmd = read ? 2 : 1;
231 cbd.cls = 3;
232
233 err = enetc_send_cmd(si, &cbd);
234 if (err)
235 dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
236
237 if (read)
238 for (i = 0; i < count; i++)
239 table[i] = tmp_align[i];
240
241 enetc_cbd_free_data_mem(si, count, tmp, &dma);
242
243 return err;
244}
245
246/* Get RSS table */
247int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
248{
249 return enetc_cmd_rss_table(si, table, count, true);
250}
251EXPORT_SYMBOL_GPL(enetc_get_rss_table);
252
253/* Set RSS table */
254int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
255{
256 return enetc_cmd_rss_table(si, (u32 *)table, count, false);
257}
258EXPORT_SYMBOL_GPL(enetc_set_rss_table);
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/* Copyright 2017-2019 NXP */
3
4#include "enetc.h"
5
6static void enetc_clean_cbdr(struct enetc_si *si)
7{
8 struct enetc_cbdr *ring = &si->cbd_ring;
9 struct enetc_cbd *dest_cbd;
10 int i, status;
11
12 i = ring->next_to_clean;
13
14 while (enetc_rd_reg(ring->cir) != i) {
15 dest_cbd = ENETC_CBD(*ring, i);
16 status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
17 if (status)
18 dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
19 status, dest_cbd->cmd);
20
21 memset(dest_cbd, 0, sizeof(*dest_cbd));
22
23 i = (i + 1) % ring->bd_count;
24 }
25
26 ring->next_to_clean = i;
27}
28
29static int enetc_cbd_unused(struct enetc_cbdr *r)
30{
31 return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
32 r->bd_count;
33}
34
35int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
36{
37 struct enetc_cbdr *ring = &si->cbd_ring;
38 int timeout = ENETC_CBDR_TIMEOUT;
39 struct enetc_cbd *dest_cbd;
40 int i;
41
42 if (unlikely(!ring->bd_base))
43 return -EIO;
44
45 if (unlikely(!enetc_cbd_unused(ring)))
46 enetc_clean_cbdr(si);
47
48 i = ring->next_to_use;
49 dest_cbd = ENETC_CBD(*ring, i);
50
51 /* copy command to the ring */
52 *dest_cbd = *cbd;
53 i = (i + 1) % ring->bd_count;
54
55 ring->next_to_use = i;
56 /* let H/W know BD ring has been updated */
57 enetc_wr_reg(ring->pir, i);
58
59 do {
60 if (enetc_rd_reg(ring->cir) == i)
61 break;
62 udelay(10); /* cannot sleep, rtnl_lock() */
63 timeout -= 10;
64 } while (timeout);
65
66 if (!timeout)
67 return -EBUSY;
68
69 /* CBD may writeback data, feedback up level */
70 *cbd = *dest_cbd;
71
72 enetc_clean_cbdr(si);
73
74 return 0;
75}
76
77int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
78{
79 struct enetc_cbd cbd;
80
81 memset(&cbd, 0, sizeof(cbd));
82
83 cbd.cls = 1;
84 cbd.status_flags = ENETC_CBD_FLAGS_SF;
85 cbd.index = cpu_to_le16(index);
86
87 return enetc_send_cmd(si, &cbd);
88}
89
90int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
91 char *mac_addr, int si_map)
92{
93 struct enetc_cbd cbd;
94 u32 upper;
95 u16 lower;
96
97 memset(&cbd, 0, sizeof(cbd));
98
99 /* fill up the "set" descriptor */
100 cbd.cls = 1;
101 cbd.status_flags = ENETC_CBD_FLAGS_SF;
102 cbd.index = cpu_to_le16(index);
103 cbd.opt[3] = cpu_to_le32(si_map);
104 /* enable entry */
105 cbd.opt[0] = cpu_to_le32(BIT(31));
106
107 upper = *(const u32 *)mac_addr;
108 lower = *(const u16 *)(mac_addr + 4);
109 cbd.addr[0] = cpu_to_le32(upper);
110 cbd.addr[1] = cpu_to_le32(lower);
111
112 return enetc_send_cmd(si, &cbd);
113}
114
115#define RFSE_ALIGN 64
116/* Set entry in RFS table */
117int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
118 int index)
119{
120 struct enetc_cbd cbd = {.cmd = 0};
121 dma_addr_t dma, dma_align;
122 void *tmp, *tmp_align;
123 int err;
124
125 /* fill up the "set" descriptor */
126 cbd.cmd = 0;
127 cbd.cls = 4;
128 cbd.index = cpu_to_le16(index);
129 cbd.length = cpu_to_le16(sizeof(*rfse));
130 cbd.opt[3] = cpu_to_le32(0); /* SI */
131
132 tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
133 &dma, GFP_KERNEL);
134 if (!tmp) {
135 dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
136 return -ENOMEM;
137 }
138
139 dma_align = ALIGN(dma, RFSE_ALIGN);
140 tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
141 memcpy(tmp_align, rfse, sizeof(*rfse));
142
143 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
144 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
145
146 err = enetc_send_cmd(si, &cbd);
147 if (err)
148 dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
149
150 dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
151 tmp, dma);
152
153 return err;
154}
155
156#define RSSE_ALIGN 64
157static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
158 bool read)
159{
160 struct enetc_cbd cbd = {.cmd = 0};
161 dma_addr_t dma, dma_align;
162 u8 *tmp, *tmp_align;
163 int err, i;
164
165 if (count < RSSE_ALIGN)
166 /* HW only takes in a full 64 entry table */
167 return -EINVAL;
168
169 tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
170 &dma, GFP_KERNEL);
171 if (!tmp) {
172 dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
173 return -ENOMEM;
174 }
175 dma_align = ALIGN(dma, RSSE_ALIGN);
176 tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
177
178 if (!read)
179 for (i = 0; i < count; i++)
180 tmp_align[i] = (u8)(table[i]);
181
182 /* fill up the descriptor */
183 cbd.cmd = read ? 2 : 1;
184 cbd.cls = 3;
185 cbd.length = cpu_to_le16(count);
186
187 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
188 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
189
190 err = enetc_send_cmd(si, &cbd);
191 if (err)
192 dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
193
194 if (read)
195 for (i = 0; i < count; i++)
196 table[i] = tmp_align[i];
197
198 dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
199
200 return err;
201}
202
203/* Get RSS table */
204int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
205{
206 return enetc_cmd_rss_table(si, table, count, true);
207}
208
209/* Set RSS table */
210int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
211{
212 return enetc_cmd_rss_table(si, (u32 *)table, count, false);
213}