Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
  2/* Copyright(c) 2020 Intel Corporation */
  3#include <linux/iopoll.h>
  4#include "adf_accel_devices.h"
  5#include "adf_cfg_services.h"
  6#include "adf_common_drv.h"
  7#include "adf_gen4_hw_data.h"
  8#include "adf_gen4_pm.h"
  9
 10static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
 11{
 12	return BUILD_RING_BASE_ADDR(addr, size);
 13}
 14
 15static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
 16{
 17	return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
 18}
 19
 20static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
 21				u32 value)
 22{
 23	WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
 24}
 25
 26static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
 27{
 28	return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
 29}
 30
 31static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
 32				u32 value)
 33{
 34	WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
 35}
 36
 37static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
 38{
 39	return READ_CSR_E_STAT(csr_base_addr, bank);
 40}
 41
 42static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
 43				  u32 value)
 44{
 45	WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
 46}
 47
 48static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
 49				dma_addr_t addr)
 50{
 51	WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
 52}
 53
 54static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
 55			       u32 value)
 56{
 57	WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
 58}
 59
 60static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
 61{
 62	WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
 63}
 64
 65static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
 66{
 67	WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
 68}
 69
 70static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
 71				  u32 value)
 72{
 73	WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
 74}
 75
 76static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
 77				       u32 value)
 78{
 79	WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
 80}
 81
 82static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
 83				      u32 value)
 84{
 85	WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
 86}
 87
 88void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
 89{
 90	csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
 91	csr_ops->read_csr_ring_head = read_csr_ring_head;
 92	csr_ops->write_csr_ring_head = write_csr_ring_head;
 93	csr_ops->read_csr_ring_tail = read_csr_ring_tail;
 94	csr_ops->write_csr_ring_tail = write_csr_ring_tail;
 95	csr_ops->read_csr_e_stat = read_csr_e_stat;
 96	csr_ops->write_csr_ring_config = write_csr_ring_config;
 97	csr_ops->write_csr_ring_base = write_csr_ring_base;
 98	csr_ops->write_csr_int_flag = write_csr_int_flag;
 99	csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
100	csr_ops->write_csr_int_col_en = write_csr_int_col_en;
101	csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
102	csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
103	csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
104}
105EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
106
107u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
108{
109	return ADF_GEN4_ACCELERATORS_MASK;
110}
111EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
112
113u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
114{
115	return ADF_GEN4_MAX_ACCELERATORS;
116}
117EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
118
119u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
120{
121	if (!self || !self->ae_mask)
122		return 0;
123
124	return hweight32(self->ae_mask);
125}
126EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
127
128u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
129{
130	return ADF_GEN4_PMISC_BAR;
131}
132EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
133
134u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
135{
136	return ADF_GEN4_ETR_BAR;
137}
138EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
139
140u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
141{
142	return ADF_GEN4_SRAM_BAR;
143}
144EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
145
146enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
147{
148	return DEV_SKU_1;
149}
150EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
151
152void adf_gen4_get_arb_info(struct arb_info *arb_info)
153{
154	arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
155	arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
156	arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
157}
158EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
159
160void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
161{
162	admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
163	admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
164	admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
165}
166EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
167
168u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
169{
170	/*
171	 * GEN4 uses KPT counter for HB
172	 */
173	return ADF_GEN4_KPT_COUNTER_FREQ;
174}
175EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
176
177void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
178{
179	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
180	void __iomem *csr = misc_bar->virt_addr;
181
182	/* Enable all in errsou3 except VFLR notification on host */
183	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
184}
185EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
186
187void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
188{
189	void __iomem *addr;
190
191	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
192
193	/* Enable bundle interrupts */
194	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
195	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
196
197	/* Enable misc interrupts */
198	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
199}
200EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
201
202int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
203{
204	void __iomem *addr;
205	u32 status;
206	u32 csr;
207	int ret;
208
209	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
210
211	/* Temporarily mask PM interrupt */
212	csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
213	csr |= ADF_GEN4_PM_SOU;
214	ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
215
216	/* Set DRV_ACTIVE bit to power up the device */
217	ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
218
219	/* Poll status register to make sure the device is powered up */
220	ret = read_poll_timeout(ADF_CSR_RD, status,
221				status & ADF_GEN4_PM_INIT_STATE,
222				ADF_GEN4_PM_POLL_DELAY_US,
223				ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
224				ADF_GEN4_PM_STATUS);
225	if (ret)
226		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
227
228	return ret;
229}
230EXPORT_SYMBOL_GPL(adf_gen4_init_device);
231
232static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
233					       u32 *lower)
234{
235	*lower = lower_32_bits(value);
236	*upper = upper_32_bits(value);
237}
238
239void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
240{
241	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
242	u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
243	u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
244	u32 ssm_wdt_pke_high = 0;
245	u32 ssm_wdt_pke_low = 0;
246	u32 ssm_wdt_high = 0;
247	u32 ssm_wdt_low = 0;
248
249	/* Convert 64bit WDT timer value into 32bit values for
250	 * mmio write to 32bit CSRs.
251	 */
252	adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
253	adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
254				    &ssm_wdt_pke_low);
255
256	/* Enable WDT for sym and dc */
257	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
258	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
259	/* Enable WDT for pke */
260	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
261	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
262}
263EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
264
265/*
266 * The vector routing table is used to select the MSI-X entry to use for each
267 * interrupt source.
268 * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
269 * The final entry corresponds to VF2PF or error interrupts.
270 * This vector table could be used to configure one MSI-X entry to be shared
271 * between multiple interrupt sources.
272 *
273 * The default routing is set to have a one to one correspondence between the
274 * interrupt source and the MSI-X entry used.
275 */
276void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
277{
278	void __iomem *csr;
279	int i;
280
281	csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
282	for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
283		ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
284}
285EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
286
287int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
288{
289	return 0;
290}
291EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
292
293static int reset_ring_pair(void __iomem *csr, u32 bank_number)
294{
295	u32 status;
296	int ret;
297
298	/* Write rpresetctl register BIT(0) as 1
299	 * Since rpresetctl registers have no RW fields, no need to preserve
300	 * values for other bits. Just write directly.
301	 */
302	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
303		   ADF_WQM_CSR_RPRESETCTL_RESET);
304
305	/* Read rpresetsts register and wait for rp reset to complete */
306	ret = read_poll_timeout(ADF_CSR_RD, status,
307				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
308				ADF_RPRESET_POLL_DELAY_US,
309				ADF_RPRESET_POLL_TIMEOUT_US, true,
310				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
311	if (!ret) {
312		/* When rp reset is done, clear rpresetsts */
313		ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
314			   ADF_WQM_CSR_RPRESETSTS_STATUS);
315	}
316
317	return ret;
318}
319
320int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
321{
322	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
323	u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
324	void __iomem *csr;
325	int ret;
326
327	if (bank_number >= hw_data->num_banks)
328		return -EINVAL;
329
330	dev_dbg(&GET_DEV(accel_dev),
331		"ring pair reset for bank:%d\n", bank_number);
332
333	csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
334	ret = reset_ring_pair(csr, bank_number);
335	if (ret)
336		dev_err(&GET_DEV(accel_dev),
337			"ring pair reset failed (timeout)\n");
338	else
339		dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
340
341	return ret;
342}
343EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
344
345static const u32 thrd_to_arb_map_dcc[] = {
346	0x00000000, 0x00000000, 0x00000000, 0x00000000,
347	0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
348	0x00000000, 0x00000000, 0x00000000, 0x00000000,
349	0x00000000, 0x00000000, 0x00000000, 0x00000000,
350	0x0
351};
352
353static const u16 rp_group_to_arb_mask[] = {
354	[RP_GROUP_0] = 0x5,
355	[RP_GROUP_1] = 0xA,
356};
357
358static bool is_single_service(int service_id)
359{
360	switch (service_id) {
361	case SVC_DC:
362	case SVC_SYM:
363	case SVC_ASYM:
364		return true;
365	case SVC_CY:
366	case SVC_CY2:
367	case SVC_DCC:
368	case SVC_ASYM_DC:
369	case SVC_DC_ASYM:
370	case SVC_SYM_DC:
371	case SVC_DC_SYM:
372	default:
373		return false;
374	}
375}
376
377int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
378{
379	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
380	u32 *thd2arb_map = hw_data->thd_to_arb_map;
381	unsigned int ae_cnt, worker_obj_cnt, i, j;
382	unsigned long ae_mask, thds_mask;
383	int srv_id, rp_group;
384	u32 thd2arb_map_base;
385	u16 arb_mask;
386
387	if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
388	    !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
389	    !hw_data->uof_get_ae_mask)
390		return -EFAULT;
391
392	srv_id = adf_get_service_enabled(accel_dev);
393	if (srv_id < 0)
394		return srv_id;
395
396	ae_cnt = hw_data->get_num_aes(hw_data);
397	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
398			 ADF_GEN4_ADMIN_ACCELENGINES;
399
400	if (srv_id == SVC_DCC) {
401		memcpy(thd2arb_map, thrd_to_arb_map_dcc,
402		       array_size(sizeof(*thd2arb_map), ae_cnt));
403		return 0;
404	}
405
406	for (i = 0; i < worker_obj_cnt; i++) {
407		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
408		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
409		thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
410		thd2arb_map_base = 0;
411
412		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
413			return -EINVAL;
414
415		if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
416			return -EINVAL;
417
418		if (is_single_service(srv_id))
419			arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
420				   rp_group_to_arb_mask[RP_GROUP_1];
421		else
422			arb_mask = rp_group_to_arb_mask[rp_group];
423
424		for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
425			thd2arb_map_base |= arb_mask << (j * 4);
426
427		for_each_set_bit(j, &ae_mask, ae_cnt)
428			thd2arb_map[j] = thd2arb_map_base;
429	}
430	return 0;
431}
432EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);