Loading...
1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2/* Copyright(c) 2014 - 2021 Intel Corporation */
3#include <adf_accel_devices.h>
4#include <adf_admin.h>
5#include <adf_common_drv.h>
6#include <adf_gen2_config.h>
7#include <adf_gen2_dc.h>
8#include <adf_gen2_hw_data.h>
9#include <adf_gen2_pfvf.h>
10#include "adf_dh895xcc_hw_data.h"
11#include "adf_heartbeat.h"
12#include "icp_qat_hw.h"
13
14#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
15
16/* Worker thread to service arbiter mappings */
17static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
18 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
19 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
20 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
21};
22
23static struct adf_hw_device_class dh895xcc_class = {
24 .name = ADF_DH895XCC_DEVICE_NAME,
25 .type = DEV_DH895XCC,
26 .instances = 0
27};
28
29static u32 get_accel_mask(struct adf_hw_device_data *self)
30{
31 u32 fuses = self->fuses;
32
33 return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
34 ADF_DH895XCC_ACCELERATORS_MASK;
35}
36
37static u32 get_ae_mask(struct adf_hw_device_data *self)
38{
39 u32 fuses = self->fuses;
40
41 return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
42}
43
44static u32 get_misc_bar_id(struct adf_hw_device_data *self)
45{
46 return ADF_DH895XCC_PMISC_BAR;
47}
48
49static u32 get_ts_clock(struct adf_hw_device_data *self)
50{
51 /*
52 * Timestamp update interval is 16 AE clock ticks for dh895xcc.
53 */
54 return self->clock_frequency / 16;
55}
56
57static u32 get_etr_bar_id(struct adf_hw_device_data *self)
58{
59 return ADF_DH895XCC_ETR_BAR;
60}
61
62static u32 get_sram_bar_id(struct adf_hw_device_data *self)
63{
64 return ADF_DH895XCC_SRAM_BAR;
65}
66
67static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
68{
69 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
70 u32 capabilities;
71 u32 legfuses;
72
73 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
74 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
75 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
76 ICP_ACCEL_CAPABILITIES_CIPHER |
77 ICP_ACCEL_CAPABILITIES_COMPRESSION;
78
79 /* Read accelerator capabilities mask */
80 pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
81
82 /* A set bit in legfuses means the feature is OFF in this SKU */
83 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
84 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
85 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
86 }
87 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
88 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
89 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
90 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
91 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
92 }
93 if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
94 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
95
96 return capabilities;
97}
98
99static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
100{
101 int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
102 >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
103
104 switch (sku) {
105 case ADF_DH895XCC_FUSECTL_SKU_1:
106 return DEV_SKU_1;
107 case ADF_DH895XCC_FUSECTL_SKU_2:
108 return DEV_SKU_2;
109 case ADF_DH895XCC_FUSECTL_SKU_3:
110 return DEV_SKU_3;
111 case ADF_DH895XCC_FUSECTL_SKU_4:
112 return DEV_SKU_4;
113 default:
114 return DEV_SKU_UNKNOWN;
115 }
116 return DEV_SKU_UNKNOWN;
117}
118
119static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
120{
121 return thrd_to_arb_map;
122}
123
124static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
125{
126 /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
127 if (vf_mask & 0xFFFF) {
128 u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
129 & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
130 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
131 }
132
133 /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
134 if (vf_mask >> 16) {
135 u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
136 & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
137 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
138 }
139}
140
141static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
142{
143 u32 val;
144
145 /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
146 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
147 | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
148 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
149
150 /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
151 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
152 | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
153 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
154}
155
156static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
157{
158 u32 sources, pending, disabled;
159 u32 errsou3, errmsk3;
160 u32 errsou5, errmsk5;
161
162 /* Get the interrupt sources triggered by VFs */
163 errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
164 errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
165 sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
166 | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
167
168 if (!sources)
169 return 0;
170
171 /* Get the already disabled interrupts */
172 errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
173 errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
174 disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
175 | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
176
177 pending = sources & ~disabled;
178 if (!pending)
179 return 0;
180
181 /* Due to HW limitations, when disabling the interrupts, we can't
182 * just disable the requested sources, as this would lead to missed
183 * interrupts if sources changes just before writing to ERRMSK3 and
184 * ERRMSK5.
185 * To work around it, disable all and re-enable only the sources that
186 * are not in vf_mask and were not already disabled. Re-enabling will
187 * trigger a new interrupt for the sources that have changed in the
188 * meantime, if any.
189 */
190 errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
191 errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
192 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
193 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
194
195 errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
196 errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
197 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
198 ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
199
200 /* Return the sources of the (new) interrupt(s) */
201 return pending;
202}
203
204static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
205{
206 adf_gen2_cfg_iov_thds(accel_dev, enable,
207 ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
208 ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
209}
210
211void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
212{
213 hw_data->dev_class = &dh895xcc_class;
214 hw_data->instance_id = dh895xcc_class.instances++;
215 hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
216 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
217 hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
218 hw_data->num_logical_accel = 1;
219 hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
220 hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
221 hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
222 hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
223 hw_data->alloc_irq = adf_isr_resource_alloc;
224 hw_data->free_irq = adf_isr_resource_free;
225 hw_data->enable_error_correction = adf_gen2_enable_error_correction;
226 hw_data->get_accel_mask = get_accel_mask;
227 hw_data->get_ae_mask = get_ae_mask;
228 hw_data->get_accel_cap = get_accel_cap;
229 hw_data->get_num_accels = adf_gen2_get_num_accels;
230 hw_data->get_num_aes = adf_gen2_get_num_aes;
231 hw_data->get_etr_bar_id = get_etr_bar_id;
232 hw_data->get_misc_bar_id = get_misc_bar_id;
233 hw_data->get_admin_info = adf_gen2_get_admin_info;
234 hw_data->get_arb_info = adf_gen2_get_arb_info;
235 hw_data->get_sram_bar_id = get_sram_bar_id;
236 hw_data->get_sku = get_sku;
237 hw_data->fw_name = ADF_DH895XCC_FW;
238 hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
239 hw_data->init_admin_comms = adf_init_admin_comms;
240 hw_data->exit_admin_comms = adf_exit_admin_comms;
241 hw_data->configure_iov_threads = configure_iov_threads;
242 hw_data->send_admin_init = adf_send_admin_init;
243 hw_data->init_arb = adf_init_arb;
244 hw_data->exit_arb = adf_exit_arb;
245 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
246 hw_data->enable_ints = adf_gen2_enable_ints;
247 hw_data->reset_device = adf_reset_sbr;
248 hw_data->disable_iov = adf_disable_sriov;
249 hw_data->dev_config = adf_gen2_dev_config;
250 hw_data->clock_frequency = ADF_DH895X_AE_FREQ;
251 hw_data->get_hb_clock = get_ts_clock;
252 hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
253 hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
254
255 adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
256 hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
257 hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
258 hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
259 adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
260 adf_gen2_init_dc_ops(&hw_data->dc_ops);
261}
262
263void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
264{
265 hw_data->dev_class->instances--;
266}