Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * System Control and Management Interface (SCMI) Performance Protocol
  4 *
  5 * Copyright (C) 2018 ARM Ltd.
  6 */
  7
  8#include <linux/bits.h>
  9#include <linux/of.h>
 10#include <linux/io.h>
 11#include <linux/io-64-nonatomic-hi-lo.h>
 12#include <linux/platform_device.h>
 13#include <linux/pm_opp.h>
 14#include <linux/sort.h>
 15
 16#include "common.h"
 17
 18enum scmi_performance_protocol_cmd {
 19	PERF_DOMAIN_ATTRIBUTES = 0x3,
 20	PERF_DESCRIBE_LEVELS = 0x4,
 21	PERF_LIMITS_SET = 0x5,
 22	PERF_LIMITS_GET = 0x6,
 23	PERF_LEVEL_SET = 0x7,
 24	PERF_LEVEL_GET = 0x8,
 25	PERF_NOTIFY_LIMITS = 0x9,
 26	PERF_NOTIFY_LEVEL = 0xa,
 27	PERF_DESCRIBE_FASTCHANNEL = 0xb,
 28};
 29
 30struct scmi_opp {
 31	u32 perf;
 32	u32 power;
 33	u32 trans_latency_us;
 34};
 35
 36struct scmi_msg_resp_perf_attributes {
 37	__le16 num_domains;
 38	__le16 flags;
 39#define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
 40	__le32 stats_addr_low;
 41	__le32 stats_addr_high;
 42	__le32 stats_size;
 43};
 44
 45struct scmi_msg_resp_perf_domain_attributes {
 46	__le32 flags;
 47#define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
 48#define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
 49#define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
 50#define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
 51#define SUPPORTS_PERF_FASTCHANNELS(x)	((x) & BIT(27))
 52	__le32 rate_limit_us;
 53	__le32 sustained_freq_khz;
 54	__le32 sustained_perf_level;
 55	    u8 name[SCMI_MAX_STR_SIZE];
 56};
 57
 58struct scmi_msg_perf_describe_levels {
 59	__le32 domain;
 60	__le32 level_index;
 61};
 62
 63struct scmi_perf_set_limits {
 64	__le32 domain;
 65	__le32 max_level;
 66	__le32 min_level;
 67};
 68
 69struct scmi_perf_get_limits {
 70	__le32 max_level;
 71	__le32 min_level;
 72};
 73
 74struct scmi_perf_set_level {
 75	__le32 domain;
 76	__le32 level;
 77};
 78
 79struct scmi_perf_notify_level_or_limits {
 80	__le32 domain;
 81	__le32 notify_enable;
 82};
 83
 84struct scmi_msg_resp_perf_describe_levels {
 85	__le16 num_returned;
 86	__le16 num_remaining;
 87	struct {
 88		__le32 perf_val;
 89		__le32 power;
 90		__le16 transition_latency_us;
 91		__le16 reserved;
 92	} opp[0];
 93};
 94
 95struct scmi_perf_get_fc_info {
 96	__le32 domain;
 97	__le32 message_id;
 98};
 99
100struct scmi_msg_resp_perf_desc_fc {
101	__le32 attr;
102#define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
103#define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
104	__le32 rate_limit;
105	__le32 chan_addr_low;
106	__le32 chan_addr_high;
107	__le32 chan_size;
108	__le32 db_addr_low;
109	__le32 db_addr_high;
110	__le32 db_set_lmask;
111	__le32 db_set_hmask;
112	__le32 db_preserve_lmask;
113	__le32 db_preserve_hmask;
114};
115
116struct scmi_fc_db_info {
117	int width;
118	u64 set;
119	u64 mask;
120	void __iomem *addr;
121};
122
123struct scmi_fc_info {
124	void __iomem *level_set_addr;
125	void __iomem *limit_set_addr;
126	void __iomem *level_get_addr;
127	void __iomem *limit_get_addr;
128	struct scmi_fc_db_info *level_set_db;
129	struct scmi_fc_db_info *limit_set_db;
130};
131
132struct perf_dom_info {
133	bool set_limits;
134	bool set_perf;
135	bool perf_limit_notify;
136	bool perf_level_notify;
137	bool perf_fastchannels;
138	u32 opp_count;
139	u32 sustained_freq_khz;
140	u32 sustained_perf_level;
141	u32 mult_factor;
142	char name[SCMI_MAX_STR_SIZE];
143	struct scmi_opp opp[MAX_OPPS];
144	struct scmi_fc_info *fc_info;
145};
146
147struct scmi_perf_info {
148	int num_domains;
149	bool power_scale_mw;
150	u64 stats_addr;
151	u32 stats_size;
152	struct perf_dom_info *dom_info;
153};
154
155static int scmi_perf_attributes_get(const struct scmi_handle *handle,
156				    struct scmi_perf_info *pi)
157{
158	int ret;
159	struct scmi_xfer *t;
160	struct scmi_msg_resp_perf_attributes *attr;
161
162	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
163				 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
164	if (ret)
165		return ret;
166
167	attr = t->rx.buf;
168
169	ret = scmi_do_xfer(handle, t);
170	if (!ret) {
171		u16 flags = le16_to_cpu(attr->flags);
172
173		pi->num_domains = le16_to_cpu(attr->num_domains);
174		pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
175		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
176				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
177		pi->stats_size = le32_to_cpu(attr->stats_size);
178	}
179
180	scmi_xfer_put(handle, t);
181	return ret;
182}
183
184static int
185scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
186				struct perf_dom_info *dom_info)
187{
188	int ret;
189	struct scmi_xfer *t;
190	struct scmi_msg_resp_perf_domain_attributes *attr;
191
192	ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
193				 SCMI_PROTOCOL_PERF, sizeof(domain),
194				 sizeof(*attr), &t);
195	if (ret)
196		return ret;
197
198	put_unaligned_le32(domain, t->tx.buf);
199	attr = t->rx.buf;
200
201	ret = scmi_do_xfer(handle, t);
202	if (!ret) {
203		u32 flags = le32_to_cpu(attr->flags);
204
205		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
206		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
207		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
208		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
209		dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
210		dom_info->sustained_freq_khz =
211					le32_to_cpu(attr->sustained_freq_khz);
212		dom_info->sustained_perf_level =
213					le32_to_cpu(attr->sustained_perf_level);
214		if (!dom_info->sustained_freq_khz ||
215		    !dom_info->sustained_perf_level)
216			/* CPUFreq converts to kHz, hence default 1000 */
217			dom_info->mult_factor =	1000;
218		else
219			dom_info->mult_factor =
220					(dom_info->sustained_freq_khz * 1000) /
221					dom_info->sustained_perf_level;
222		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
223	}
224
225	scmi_xfer_put(handle, t);
226	return ret;
227}
228
229static int opp_cmp_func(const void *opp1, const void *opp2)
230{
231	const struct scmi_opp *t1 = opp1, *t2 = opp2;
232
233	return t1->perf - t2->perf;
234}
235
236static int
237scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
238			      struct perf_dom_info *perf_dom)
239{
240	int ret, cnt;
241	u32 tot_opp_cnt = 0;
242	u16 num_returned, num_remaining;
243	struct scmi_xfer *t;
244	struct scmi_opp *opp;
245	struct scmi_msg_perf_describe_levels *dom_info;
246	struct scmi_msg_resp_perf_describe_levels *level_info;
247
248	ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
249				 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
250	if (ret)
251		return ret;
252
253	dom_info = t->tx.buf;
254	level_info = t->rx.buf;
255
256	do {
257		dom_info->domain = cpu_to_le32(domain);
258		/* Set the number of OPPs to be skipped/already read */
259		dom_info->level_index = cpu_to_le32(tot_opp_cnt);
260
261		ret = scmi_do_xfer(handle, t);
262		if (ret)
263			break;
264
265		num_returned = le16_to_cpu(level_info->num_returned);
266		num_remaining = le16_to_cpu(level_info->num_remaining);
267		if (tot_opp_cnt + num_returned > MAX_OPPS) {
268			dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
269			break;
270		}
271
272		opp = &perf_dom->opp[tot_opp_cnt];
273		for (cnt = 0; cnt < num_returned; cnt++, opp++) {
274			opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
275			opp->power = le32_to_cpu(level_info->opp[cnt].power);
276			opp->trans_latency_us = le16_to_cpu
277				(level_info->opp[cnt].transition_latency_us);
278
279			dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
280				opp->perf, opp->power, opp->trans_latency_us);
281		}
282
283		tot_opp_cnt += num_returned;
284		/*
285		 * check for both returned and remaining to avoid infinite
286		 * loop due to buggy firmware
287		 */
288	} while (num_returned && num_remaining);
289
290	perf_dom->opp_count = tot_opp_cnt;
291	scmi_xfer_put(handle, t);
292
293	sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
294	return ret;
295}
296
297#define SCMI_PERF_FC_RING_DB(w)				\
298do {							\
299	u##w val = 0;					\
300							\
301	if (db->mask)					\
302		val = ioread##w(db->addr) & db->mask;	\
303	iowrite##w((u##w)db->set | val, db->addr);	\
304} while (0)
305
306static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
307{
308	if (!db || !db->addr)
309		return;
310
311	if (db->width == 1)
312		SCMI_PERF_FC_RING_DB(8);
313	else if (db->width == 2)
314		SCMI_PERF_FC_RING_DB(16);
315	else if (db->width == 4)
316		SCMI_PERF_FC_RING_DB(32);
317	else /* db->width == 8 */
318#ifdef CONFIG_64BIT
319		SCMI_PERF_FC_RING_DB(64);
320#else
321	{
322		u64 val = 0;
323
324		if (db->mask)
325			val = ioread64_hi_lo(db->addr) & db->mask;
326		iowrite64_hi_lo(db->set, db->addr);
327	}
328#endif
329}
330
331static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
332				   u32 max_perf, u32 min_perf)
333{
334	int ret;
335	struct scmi_xfer *t;
336	struct scmi_perf_set_limits *limits;
337
338	ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
339				 sizeof(*limits), 0, &t);
340	if (ret)
341		return ret;
342
343	limits = t->tx.buf;
344	limits->domain = cpu_to_le32(domain);
345	limits->max_level = cpu_to_le32(max_perf);
346	limits->min_level = cpu_to_le32(min_perf);
347
348	ret = scmi_do_xfer(handle, t);
349
350	scmi_xfer_put(handle, t);
351	return ret;
352}
353
354static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
355				u32 max_perf, u32 min_perf)
356{
357	struct scmi_perf_info *pi = handle->perf_priv;
358	struct perf_dom_info *dom = pi->dom_info + domain;
359
360	if (dom->fc_info && dom->fc_info->limit_set_addr) {
361		iowrite32(max_perf, dom->fc_info->limit_set_addr);
362		iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
363		scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
364		return 0;
365	}
366
367	return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
368}
369
370static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
371				   u32 *max_perf, u32 *min_perf)
372{
373	int ret;
374	struct scmi_xfer *t;
375	struct scmi_perf_get_limits *limits;
376
377	ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
378				 sizeof(__le32), 0, &t);
379	if (ret)
380		return ret;
381
382	put_unaligned_le32(domain, t->tx.buf);
383
384	ret = scmi_do_xfer(handle, t);
385	if (!ret) {
386		limits = t->rx.buf;
387
388		*max_perf = le32_to_cpu(limits->max_level);
389		*min_perf = le32_to_cpu(limits->min_level);
390	}
391
392	scmi_xfer_put(handle, t);
393	return ret;
394}
395
396static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
397				u32 *max_perf, u32 *min_perf)
398{
399	struct scmi_perf_info *pi = handle->perf_priv;
400	struct perf_dom_info *dom = pi->dom_info + domain;
401
402	if (dom->fc_info && dom->fc_info->limit_get_addr) {
403		*max_perf = ioread32(dom->fc_info->limit_get_addr);
404		*min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
405		return 0;
406	}
407
408	return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
409}
410
411static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
412				  u32 level, bool poll)
413{
414	int ret;
415	struct scmi_xfer *t;
416	struct scmi_perf_set_level *lvl;
417
418	ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
419				 sizeof(*lvl), 0, &t);
420	if (ret)
421		return ret;
422
423	t->hdr.poll_completion = poll;
424	lvl = t->tx.buf;
425	lvl->domain = cpu_to_le32(domain);
426	lvl->level = cpu_to_le32(level);
427
428	ret = scmi_do_xfer(handle, t);
429
430	scmi_xfer_put(handle, t);
431	return ret;
432}
433
434static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
435			       u32 level, bool poll)
436{
437	struct scmi_perf_info *pi = handle->perf_priv;
438	struct perf_dom_info *dom = pi->dom_info + domain;
439
440	if (dom->fc_info && dom->fc_info->level_set_addr) {
441		iowrite32(level, dom->fc_info->level_set_addr);
442		scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
443		return 0;
444	}
445
446	return scmi_perf_mb_level_set(handle, domain, level, poll);
447}
448
449static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
450				  u32 *level, bool poll)
451{
452	int ret;
453	struct scmi_xfer *t;
454
455	ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
456				 sizeof(u32), sizeof(u32), &t);
457	if (ret)
458		return ret;
459
460	t->hdr.poll_completion = poll;
461	put_unaligned_le32(domain, t->tx.buf);
462
463	ret = scmi_do_xfer(handle, t);
464	if (!ret)
465		*level = get_unaligned_le32(t->rx.buf);
466
467	scmi_xfer_put(handle, t);
468	return ret;
469}
470
471static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
472			       u32 *level, bool poll)
473{
474	struct scmi_perf_info *pi = handle->perf_priv;
475	struct perf_dom_info *dom = pi->dom_info + domain;
476
477	if (dom->fc_info && dom->fc_info->level_get_addr) {
478		*level = ioread32(dom->fc_info->level_get_addr);
479		return 0;
480	}
481
482	return scmi_perf_mb_level_get(handle, domain, level, poll);
483}
484
485static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
486{
487	if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
488		return true;
489	if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
490		return true;
491	return false;
492}
493
494static void
495scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
496			 u32 message_id, void __iomem **p_addr,
497			 struct scmi_fc_db_info **p_db)
498{
499	int ret;
500	u32 flags;
501	u64 phys_addr;
502	u8 size;
503	void __iomem *addr;
504	struct scmi_xfer *t;
505	struct scmi_fc_db_info *db;
506	struct scmi_perf_get_fc_info *info;
507	struct scmi_msg_resp_perf_desc_fc *resp;
508
509	if (!p_addr)
510		return;
511
512	ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
513				 SCMI_PROTOCOL_PERF,
514				 sizeof(*info), sizeof(*resp), &t);
515	if (ret)
516		return;
517
518	info = t->tx.buf;
519	info->domain = cpu_to_le32(domain);
520	info->message_id = cpu_to_le32(message_id);
521
522	ret = scmi_do_xfer(handle, t);
523	if (ret)
524		goto err_xfer;
525
526	resp = t->rx.buf;
527	flags = le32_to_cpu(resp->attr);
528	size = le32_to_cpu(resp->chan_size);
529	if (!scmi_perf_fc_size_is_valid(message_id, size))
530		goto err_xfer;
531
532	phys_addr = le32_to_cpu(resp->chan_addr_low);
533	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
534	addr = devm_ioremap(handle->dev, phys_addr, size);
535	if (!addr)
536		goto err_xfer;
537	*p_addr = addr;
538
539	if (p_db && SUPPORTS_DOORBELL(flags)) {
540		db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
541		if (!db)
542			goto err_xfer;
543
544		size = 1 << DOORBELL_REG_WIDTH(flags);
545		phys_addr = le32_to_cpu(resp->db_addr_low);
546		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
547		addr = devm_ioremap(handle->dev, phys_addr, size);
548		if (!addr)
549			goto err_xfer;
550
551		db->addr = addr;
552		db->width = size;
553		db->set = le32_to_cpu(resp->db_set_lmask);
554		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
555		db->mask = le32_to_cpu(resp->db_preserve_lmask);
556		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
557		*p_db = db;
558	}
559err_xfer:
560	scmi_xfer_put(handle, t);
561}
562
563static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
564				     u32 domain, struct scmi_fc_info **p_fc)
565{
566	struct scmi_fc_info *fc;
567
568	fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
569	if (!fc)
570		return;
571
572	scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
573				 &fc->level_set_addr, &fc->level_set_db);
574	scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
575				 &fc->level_get_addr, NULL);
576	scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
577				 &fc->limit_set_addr, &fc->limit_set_db);
578	scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
579				 &fc->limit_get_addr, NULL);
580	*p_fc = fc;
581}
582
583/* Device specific ops */
584static int scmi_dev_domain_id(struct device *dev)
585{
586	struct of_phandle_args clkspec;
587
588	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
589				       0, &clkspec))
590		return -EINVAL;
591
592	return clkspec.args[0];
593}
594
595static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
596				     struct device *dev)
597{
598	int idx, ret, domain;
599	unsigned long freq;
600	struct scmi_opp *opp;
601	struct perf_dom_info *dom;
602	struct scmi_perf_info *pi = handle->perf_priv;
603
604	domain = scmi_dev_domain_id(dev);
605	if (domain < 0)
606		return domain;
607
608	dom = pi->dom_info + domain;
609
610	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
611		freq = opp->perf * dom->mult_factor;
612
613		ret = dev_pm_opp_add(dev, freq, 0);
614		if (ret) {
615			dev_warn(dev, "failed to add opp %luHz\n", freq);
616
617			while (idx-- > 0) {
618				freq = (--opp)->perf * dom->mult_factor;
619				dev_pm_opp_remove(dev, freq);
620			}
621			return ret;
622		}
623	}
624	return 0;
625}
626
627static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
628					    struct device *dev)
629{
630	struct perf_dom_info *dom;
631	struct scmi_perf_info *pi = handle->perf_priv;
632	int domain = scmi_dev_domain_id(dev);
633
634	if (domain < 0)
635		return domain;
636
637	dom = pi->dom_info + domain;
638	/* uS to nS */
639	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
640}
641
642static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
643			      unsigned long freq, bool poll)
644{
645	struct scmi_perf_info *pi = handle->perf_priv;
646	struct perf_dom_info *dom = pi->dom_info + domain;
647
648	return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
649				   poll);
650}
651
652static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
653			      unsigned long *freq, bool poll)
654{
655	int ret;
656	u32 level;
657	struct scmi_perf_info *pi = handle->perf_priv;
658	struct perf_dom_info *dom = pi->dom_info + domain;
659
660	ret = scmi_perf_level_get(handle, domain, &level, poll);
661	if (!ret)
662		*freq = level * dom->mult_factor;
663
664	return ret;
665}
666
667static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
668				   unsigned long *freq, unsigned long *power)
669{
670	struct scmi_perf_info *pi = handle->perf_priv;
671	struct perf_dom_info *dom;
672	unsigned long opp_freq;
673	int idx, ret = -EINVAL;
674	struct scmi_opp *opp;
675
676	dom = pi->dom_info + domain;
677	if (!dom)
678		return -EIO;
679
680	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
681		opp_freq = opp->perf * dom->mult_factor;
682		if (opp_freq < *freq)
683			continue;
684
685		*freq = opp_freq;
686		*power = opp->power;
687		ret = 0;
688		break;
689	}
690
691	return ret;
692}
693
694static struct scmi_perf_ops perf_ops = {
695	.limits_set = scmi_perf_limits_set,
696	.limits_get = scmi_perf_limits_get,
697	.level_set = scmi_perf_level_set,
698	.level_get = scmi_perf_level_get,
699	.device_domain_id = scmi_dev_domain_id,
700	.transition_latency_get = scmi_dvfs_transition_latency_get,
701	.device_opps_add = scmi_dvfs_device_opps_add,
702	.freq_set = scmi_dvfs_freq_set,
703	.freq_get = scmi_dvfs_freq_get,
704	.est_power_get = scmi_dvfs_est_power_get,
705};
706
707static int scmi_perf_protocol_init(struct scmi_handle *handle)
708{
709	int domain;
710	u32 version;
711	struct scmi_perf_info *pinfo;
712
713	scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
714
715	dev_dbg(handle->dev, "Performance Version %d.%d\n",
716		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
717
718	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
719	if (!pinfo)
720		return -ENOMEM;
721
722	scmi_perf_attributes_get(handle, pinfo);
723
724	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
725				       sizeof(*pinfo->dom_info), GFP_KERNEL);
726	if (!pinfo->dom_info)
727		return -ENOMEM;
728
729	for (domain = 0; domain < pinfo->num_domains; domain++) {
730		struct perf_dom_info *dom = pinfo->dom_info + domain;
731
732		scmi_perf_domain_attributes_get(handle, domain, dom);
733		scmi_perf_describe_levels_get(handle, domain, dom);
734
735		if (dom->perf_fastchannels)
736			scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
737	}
738
739	handle->perf_ops = &perf_ops;
740	handle->perf_priv = pinfo;
741
742	return 0;
743}
744
745static int __init scmi_perf_init(void)
746{
747	return scmi_protocol_register(SCMI_PROTOCOL_PERF,
748				      &scmi_perf_protocol_init);
749}
750subsys_initcall(scmi_perf_init);