Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2023 Intel Corporation. */
  3#define dev_fmt(fmt) "Telemetry: " fmt
  4
  5#include <asm/errno.h>
  6#include <linux/atomic.h>
  7#include <linux/device.h>
  8#include <linux/dev_printk.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/jiffies.h>
 11#include <linux/kernel.h>
 12#include <linux/mutex.h>
 13#include <linux/slab.h>
 14#include <linux/string.h>
 15#include <linux/workqueue.h>
 16
 17#include "adf_admin.h"
 18#include "adf_accel_devices.h"
 19#include "adf_common_drv.h"
 20#include "adf_telemetry.h"
 21
 22#define TL_IS_ZERO(input)	((input) == 0)
 23
 24static bool is_tl_supported(struct adf_accel_dev *accel_dev)
 25{
 26	u16 fw_caps =  GET_HW_DATA(accel_dev)->fw_capabilities;
 27
 28	return fw_caps & TL_CAPABILITY_BIT;
 29}
 30
 31static int validate_tl_data(struct adf_tl_hw_data *tl_data)
 32{
 33	if (!tl_data->dev_counters ||
 34	    TL_IS_ZERO(tl_data->num_dev_counters) ||
 35	    !tl_data->sl_util_counters ||
 36	    !tl_data->sl_exec_counters ||
 37	    !tl_data->rp_counters ||
 38	    TL_IS_ZERO(tl_data->num_rp_counters))
 39		return -EOPNOTSUPP;
 40
 41	return 0;
 42}
 43
 44static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
 45{
 46	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
 47	struct device *dev = &GET_DEV(accel_dev);
 48	size_t regs_sz = tl_data->layout_sz;
 49	struct adf_telemetry *telemetry;
 50	int node = dev_to_node(dev);
 51	void *tl_data_regs;
 52	unsigned int i;
 53
 54	telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
 55	if (!telemetry)
 56		return -ENOMEM;
 57
 58	telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
 59						  sizeof(*telemetry->rp_num_indexes),
 60						  GFP_KERNEL);
 61	if (!telemetry->rp_num_indexes)
 62		goto err_free_tl;
 63
 64	telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff,
 65						  sizeof(*telemetry->regs_hist_buff),
 66						  GFP_KERNEL);
 67	if (!telemetry->regs_hist_buff)
 68		goto err_free_rp_indexes;
 69
 70	telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
 71						  &telemetry->regs_data_p,
 72						  GFP_KERNEL);
 73	if (!telemetry->regs_data)
 74		goto err_free_regs_hist_buff;
 75
 76	for (i = 0; i < tl_data->num_hbuff; i++) {
 77		tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
 78		if (!tl_data_regs)
 79			goto err_free_dma;
 80
 81		telemetry->regs_hist_buff[i] = tl_data_regs;
 82	}
 83
 84	accel_dev->telemetry = telemetry;
 85
 86	return 0;
 87
 88err_free_dma:
 89	dma_free_coherent(dev, regs_sz, telemetry->regs_data,
 90			  telemetry->regs_data_p);
 91
 92	while (i--)
 93		kfree(telemetry->regs_hist_buff[i]);
 94
 95err_free_regs_hist_buff:
 96	kfree(telemetry->regs_hist_buff);
 97err_free_rp_indexes:
 98	kfree(telemetry->rp_num_indexes);
 99err_free_tl:
100	kfree(telemetry);
101
102	return -ENOMEM;
103}
104
105static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
106{
107	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
108	struct adf_telemetry *telemetry = accel_dev->telemetry;
109	struct device *dev = &GET_DEV(accel_dev);
110	size_t regs_sz = tl_data->layout_sz;
111	unsigned int i;
112
113	for (i = 0; i < tl_data->num_hbuff; i++)
114		kfree(telemetry->regs_hist_buff[i]);
115
116	dma_free_coherent(dev, regs_sz, telemetry->regs_data,
117			  telemetry->regs_data_p);
118
119	kfree(telemetry->regs_hist_buff);
120	kfree(telemetry->rp_num_indexes);
121	kfree(telemetry);
122	accel_dev->telemetry = NULL;
123}
124
125static unsigned long get_next_timeout(void)
126{
127	return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
128}
129
130static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
131{
132	void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
133	void *src = telemetry->regs_data;
134
135	memcpy(dst, src, size);
136}
137
138static void tl_work_handler(struct work_struct *work)
139{
140	struct delayed_work *delayed_work;
141	struct adf_telemetry *telemetry;
142	struct adf_tl_hw_data *tl_data;
143	u32 msg_cnt, old_msg_cnt;
144	size_t layout_sz;
145	u32 *regs_data;
146	size_t id;
147
148	delayed_work = to_delayed_work(work);
149	telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
150	tl_data = &GET_TL_DATA(telemetry->accel_dev);
151	regs_data = telemetry->regs_data;
152
153	id = tl_data->msg_cnt_off / sizeof(*regs_data);
154	layout_sz = tl_data->layout_sz;
155
156	if (!atomic_read(&telemetry->state)) {
157		cancel_delayed_work_sync(&telemetry->work_ctx);
158		return;
159	}
160
161	msg_cnt = regs_data[id];
162	old_msg_cnt = msg_cnt;
163	if (msg_cnt == telemetry->msg_cnt)
164		goto out;
165
166	mutex_lock(&telemetry->regs_hist_lock);
167
168	snapshot_regs(telemetry, layout_sz);
169
170	/* Check if data changed while updating it */
171	msg_cnt = regs_data[id];
172	if (old_msg_cnt != msg_cnt)
173		snapshot_regs(telemetry, layout_sz);
174
175	telemetry->msg_cnt = msg_cnt;
176	telemetry->hb_num++;
177	telemetry->hb_num %= telemetry->hbuffs;
178
179	mutex_unlock(&telemetry->regs_hist_lock);
180
181out:
182	adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
183}
184
185int adf_tl_halt(struct adf_accel_dev *accel_dev)
186{
187	struct adf_telemetry *telemetry = accel_dev->telemetry;
188	struct device *dev = &GET_DEV(accel_dev);
189	int ret;
190
191	cancel_delayed_work_sync(&telemetry->work_ctx);
192	atomic_set(&telemetry->state, 0);
193
194	ret = adf_send_admin_tl_stop(accel_dev);
195	if (ret)
196		dev_err(dev, "failed to stop telemetry\n");
197
198	return ret;
199}
200
201int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
202{
203	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
204	struct adf_telemetry *telemetry = accel_dev->telemetry;
205	struct device *dev = &GET_DEV(accel_dev);
206	size_t layout_sz = tl_data->layout_sz;
207	int ret;
208
209	ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
210				      layout_sz, telemetry->rp_num_indexes,
211				      &telemetry->slice_cnt);
212	if (ret) {
213		dev_err(dev, "failed to start telemetry\n");
214		return ret;
215	}
216
217	telemetry->hbuffs = state;
218	atomic_set(&telemetry->state, state);
219
220	adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
221
222	return 0;
223}
224
225int adf_tl_init(struct adf_accel_dev *accel_dev)
226{
227	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
228	u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
229	struct device *dev = &GET_DEV(accel_dev);
230	struct adf_telemetry *telemetry;
231	unsigned int i;
232	int ret;
233
234	ret = validate_tl_data(tl_data);
235	if (ret)
236		return ret;
237
238	ret = adf_tl_alloc_mem(accel_dev);
239	if (ret) {
240		dev_err(dev, "failed to initialize: %d\n", ret);
241		return ret;
242	}
243
244	telemetry = accel_dev->telemetry;
245	telemetry->accel_dev = accel_dev;
246
247	mutex_init(&telemetry->wr_lock);
248	mutex_init(&telemetry->regs_hist_lock);
249	INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
250
251	for (i = 0; i < max_rp; i++)
252		telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
253
254	return 0;
255}
256
257int adf_tl_start(struct adf_accel_dev *accel_dev)
258{
259	struct device *dev = &GET_DEV(accel_dev);
260
261	if (!accel_dev->telemetry)
262		return -EOPNOTSUPP;
263
264	if (!is_tl_supported(accel_dev)) {
265		dev_info(dev, "feature not supported by FW\n");
266		adf_tl_free_mem(accel_dev);
267		return -EOPNOTSUPP;
268	}
269
270	return 0;
271}
272
273void adf_tl_stop(struct adf_accel_dev *accel_dev)
274{
275	if (!accel_dev->telemetry)
276		return;
277
278	if (atomic_read(&accel_dev->telemetry->state))
279		adf_tl_halt(accel_dev);
280}
281
282void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
283{
284	if (!accel_dev->telemetry)
285		return;
286
287	adf_tl_free_mem(accel_dev);
288}