Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * System Control and Management Interface (SCMI) Message Protocol driver
  4 *
  5 * SCMI Message Protocol is used between the System Control Processor(SCP)
  6 * and the Application Processors(AP). The Message Handling Unit(MHU)
  7 * provides a mechanism for inter-processor communication between SCP's
  8 * Cortex M3 and AP.
  9 *
 10 * SCP offers control and management of the core/cluster power states,
 11 * various power domain DVFS including the core/cluster, certain system
 12 * clocks configuration, thermal sensors and many others.
 13 *
 14 * Copyright (C) 2018 ARM Ltd.
 15 */
 16
 17#include <linux/bitmap.h>
 
 18#include <linux/export.h>
 
 19#include <linux/io.h>
 20#include <linux/kernel.h>
 21#include <linux/ktime.h>
 
 22#include <linux/module.h>
 23#include <linux/of_address.h>
 24#include <linux/of_device.h>
 25#include <linux/processor.h>
 
 26#include <linux/slab.h>
 27
 28#include "common.h"
 29#include "notify.h"
 30
 31#define CREATE_TRACE_POINTS
 32#include <trace/events/scmi.h>
 33
 34enum scmi_error_codes {
 35	SCMI_SUCCESS = 0,	/* Success */
 36	SCMI_ERR_SUPPORT = -1,	/* Not supported */
 37	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
 38	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
 39	SCMI_ERR_ENTRY = -4,	/* Not found */
 40	SCMI_ERR_RANGE = -5,	/* Value out of range */
 41	SCMI_ERR_BUSY = -6,	/* Device busy */
 42	SCMI_ERR_COMMS = -7,	/* Communication Error */
 43	SCMI_ERR_GENERIC = -8,	/* Generic Error */
 44	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
 45	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
 46	SCMI_ERR_MAX
 47};
 48
 49/* List of all SCMI devices active in system */
 50static LIST_HEAD(scmi_list);
 51/* Protection for the entire list */
 52static DEFINE_MUTEX(scmi_list_mutex);
 53/* Track the unique id for the transfers for debug & profiling purpose */
 54static atomic_t transfer_last_id;
 55
 
 
 
 
 
 
 
 
 56/**
 57 * struct scmi_xfers_info - Structure to manage transfer information
 58 *
 59 * @xfer_block: Preallocated Message array
 60 * @xfer_alloc_table: Bitmap table for allocated messages.
 61 *	Index of this bitmap table is also used for message
 62 *	sequence identifier.
 63 * @xfer_lock: Protection for message allocation
 64 */
 65struct scmi_xfers_info {
 66	struct scmi_xfer *xfer_block;
 67	unsigned long *xfer_alloc_table;
 68	spinlock_t xfer_lock;
 69};
 70
 71/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72 * struct scmi_info - Structure representing a SCMI instance
 73 *
 74 * @dev: Device pointer
 75 * @desc: SoC description for this instance
 76 * @version: SCMI revision information containing protocol version,
 77 *	implementation version and (sub-)vendor identification.
 78 * @handle: Instance of SCMI handle to send to clients
 79 * @tx_minfo: Universal Transmit Message management info
 80 * @rx_minfo: Universal Receive Message management info
 81 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
 82 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
 
 
 
 
 83 * @protocols_imp: List of protocols implemented, currently maximum of
 84 *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
 
 
 
 85 * @node: List head
 86 * @users: Number of users of this instance
 87 */
 88struct scmi_info {
 89	struct device *dev;
 90	const struct scmi_desc *desc;
 91	struct scmi_revision_info version;
 92	struct scmi_handle handle;
 93	struct scmi_xfers_info tx_minfo;
 94	struct scmi_xfers_info rx_minfo;
 95	struct idr tx_idr;
 96	struct idr rx_idr;
 
 
 
 97	u8 *protocols_imp;
 
 
 98	struct list_head node;
 99	int users;
100};
101
102#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
103
104static const int scmi_linux_errmap[] = {
105	/* better than switch case as long as return value is continuous */
106	0,			/* SCMI_SUCCESS */
107	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
108	-EINVAL,		/* SCMI_ERR_PARAM */
109	-EACCES,		/* SCMI_ERR_ACCESS */
110	-ENOENT,		/* SCMI_ERR_ENTRY */
111	-ERANGE,		/* SCMI_ERR_RANGE */
112	-EBUSY,			/* SCMI_ERR_BUSY */
113	-ECOMM,			/* SCMI_ERR_COMMS */
114	-EIO,			/* SCMI_ERR_GENERIC */
115	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
116	-EPROTO,		/* SCMI_ERR_PROTOCOL */
117};
118
119static inline int scmi_to_linux_errno(int errno)
120{
121	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
122		return scmi_linux_errmap[-errno];
 
 
123	return -EIO;
124}
125
126/**
127 * scmi_dump_header_dbg() - Helper to dump a message header.
128 *
129 * @dev: Device pointer corresponding to the SCMI entity
130 * @hdr: pointer to header.
131 */
132static inline void scmi_dump_header_dbg(struct device *dev,
133					struct scmi_msg_hdr *hdr)
134{
135	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
136		hdr->id, hdr->seq, hdr->protocol_id);
137}
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139/**
140 * scmi_xfer_get() - Allocate one message
141 *
142 * @handle: Pointer to SCMI entity handle
143 * @minfo: Pointer to Tx/Rx Message management info based on channel type
144 *
145 * Helper function which is used by various message functions that are
146 * exposed to clients of this driver for allocating a message traffic event.
147 *
148 * This function can sleep depending on pending requests already in the system
149 * for the SCMI entity. Further, this also holds a spinlock to maintain
150 * integrity of internal data structures.
151 *
152 * Return: 0 if all went fine, else corresponding error.
153 */
154static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
155				       struct scmi_xfers_info *minfo)
156{
157	u16 xfer_id;
158	struct scmi_xfer *xfer;
159	unsigned long flags, bit_pos;
160	struct scmi_info *info = handle_to_scmi_info(handle);
161
162	/* Keep the locked section as small as possible */
163	spin_lock_irqsave(&minfo->xfer_lock, flags);
164	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
165				      info->desc->max_msg);
166	if (bit_pos == info->desc->max_msg) {
167		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
168		return ERR_PTR(-ENOMEM);
169	}
170	set_bit(bit_pos, minfo->xfer_alloc_table);
171	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
172
173	xfer_id = bit_pos;
174
175	xfer = &minfo->xfer_block[xfer_id];
176	xfer->hdr.seq = xfer_id;
177	reinit_completion(&xfer->done);
178	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
179
180	return xfer;
181}
182
183/**
184 * __scmi_xfer_put() - Release a message
185 *
186 * @minfo: Pointer to Tx/Rx Message management info based on channel type
187 * @xfer: message that was reserved by scmi_xfer_get
188 *
189 * This holds a spinlock to maintain integrity of internal data structures.
190 */
191static void
192__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
193{
194	unsigned long flags;
195
196	/*
197	 * Keep the locked section as small as possible
198	 * NOTE: we might escape with smp_mb and no lock here..
199	 * but just be conservative and symmetric.
200	 */
201	spin_lock_irqsave(&minfo->xfer_lock, flags);
202	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
203	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
204}
205
206static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
207{
208	struct scmi_xfer *xfer;
209	struct device *dev = cinfo->dev;
210	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
211	struct scmi_xfers_info *minfo = &info->rx_minfo;
212	ktime_t ts;
213
214	ts = ktime_get_boottime();
215	xfer = scmi_xfer_get(cinfo->handle, minfo);
216	if (IS_ERR(xfer)) {
217		dev_err(dev, "failed to get free message slot (%ld)\n",
218			PTR_ERR(xfer));
219		info->desc->ops->clear_channel(cinfo);
220		return;
221	}
222
223	unpack_scmi_header(msg_hdr, &xfer->hdr);
224	scmi_dump_header_dbg(dev, &xfer->hdr);
225	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
226					    xfer);
227	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
228		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
229
230	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
231			   xfer->hdr.protocol_id, xfer->hdr.seq,
232			   MSG_TYPE_NOTIFICATION);
233
234	__scmi_xfer_put(minfo, xfer);
235
236	info->desc->ops->clear_channel(cinfo);
237}
238
239static void scmi_handle_response(struct scmi_chan_info *cinfo,
240				 u16 xfer_id, u8 msg_type)
241{
242	struct scmi_xfer *xfer;
243	struct device *dev = cinfo->dev;
244	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
245	struct scmi_xfers_info *minfo = &info->tx_minfo;
246
247	/* Are we even expecting this? */
248	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
249		dev_err(dev, "message for %d is not expected!\n", xfer_id);
250		info->desc->ops->clear_channel(cinfo);
251		return;
252	}
253
254	xfer = &minfo->xfer_block[xfer_id];
255	/*
256	 * Even if a response was indeed expected on this slot at this point,
257	 * a buggy platform could wrongly reply feeding us an unexpected
258	 * delayed response we're not prepared to handle: bail-out safely
259	 * blaming firmware.
260	 */
261	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
262		dev_err(dev,
263			"Delayed Response for %d not expected! Buggy F/W ?\n",
264			xfer_id);
265		info->desc->ops->clear_channel(cinfo);
266		/* It was unexpected, so nobody will clear the xfer if not us */
267		__scmi_xfer_put(minfo, xfer);
268		return;
269	}
270
 
 
 
 
271	scmi_dump_header_dbg(dev, &xfer->hdr);
272
273	info->desc->ops->fetch_response(cinfo, xfer);
274
275	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
276			   xfer->hdr.protocol_id, xfer->hdr.seq,
277			   msg_type);
278
279	if (msg_type == MSG_TYPE_DELAYED_RESP) {
280		info->desc->ops->clear_channel(cinfo);
281		complete(xfer->async_done);
282	} else {
283		complete(&xfer->done);
284	}
285}
286
287/**
288 * scmi_rx_callback() - callback for receiving messages
289 *
290 * @cinfo: SCMI channel info
291 * @msg_hdr: Message header
292 *
293 * Processes one received message to appropriate transfer information and
294 * signals completion of the transfer.
295 *
296 * NOTE: This function will be invoked in IRQ context, hence should be
297 * as optimal as possible.
298 */
299void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
300{
301	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
302	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
303
304	switch (msg_type) {
305	case MSG_TYPE_NOTIFICATION:
306		scmi_handle_notification(cinfo, msg_hdr);
307		break;
308	case MSG_TYPE_COMMAND:
309	case MSG_TYPE_DELAYED_RESP:
310		scmi_handle_response(cinfo, xfer_id, msg_type);
311		break;
312	default:
313		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
314		break;
315	}
316}
317
318/**
319 * scmi_xfer_put() - Release a transmit message
320 *
321 * @handle: Pointer to SCMI entity handle
322 * @xfer: message that was reserved by scmi_xfer_get
323 */
324void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
 
325{
326	struct scmi_info *info = handle_to_scmi_info(handle);
 
327
328	__scmi_xfer_put(&info->tx_minfo, xfer);
329}
330
331#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
332
333static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
334				      struct scmi_xfer *xfer, ktime_t stop)
335{
336	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
337
338	return info->desc->ops->poll_done(cinfo, xfer) ||
339	       ktime_after(ktime_get(), stop);
340}
341
342/**
343 * scmi_do_xfer() - Do one transfer
344 *
345 * @handle: Pointer to SCMI entity handle
346 * @xfer: Transfer to initiate and wait for response
347 *
348 * Return: -ETIMEDOUT in case of no response, if transmit error,
349 *	return corresponding error, else if all goes well,
350 *	return 0.
351 */
352int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
 
353{
354	int ret;
355	int timeout;
356	struct scmi_info *info = handle_to_scmi_info(handle);
 
357	struct device *dev = info->dev;
358	struct scmi_chan_info *cinfo;
359
 
 
 
 
 
 
 
 
360	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
361	if (unlikely(!cinfo))
362		return -EINVAL;
363
364	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
365			      xfer->hdr.protocol_id, xfer->hdr.seq,
366			      xfer->hdr.poll_completion);
367
368	ret = info->desc->ops->send_message(cinfo, xfer);
369	if (ret < 0) {
370		dev_dbg(dev, "Failed to send message %d\n", ret);
371		return ret;
372	}
373
374	if (xfer->hdr.poll_completion) {
375		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
376
377		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
378
379		if (ktime_before(ktime_get(), stop))
380			info->desc->ops->fetch_response(cinfo, xfer);
381		else
382			ret = -ETIMEDOUT;
383	} else {
384		/* And we wait for the response. */
385		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
386		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
387			dev_err(dev, "timed out in resp(caller: %pS)\n",
388				(void *)_RET_IP_);
389			ret = -ETIMEDOUT;
390		}
391	}
392
393	if (!ret && xfer->hdr.status)
394		ret = scmi_to_linux_errno(xfer->hdr.status);
395
396	if (info->desc->ops->mark_txdone)
397		info->desc->ops->mark_txdone(cinfo, ret);
398
399	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
400			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
401
402	return ret;
403}
404
 
 
 
 
 
 
 
 
 
405#define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
406
407/**
408 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
409 *	response is received
410 *
411 * @handle: Pointer to SCMI entity handle
412 * @xfer: Transfer to initiate and wait for response
413 *
414 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
415 *	return corresponding error, else if all goes well, return 0.
416 */
417int scmi_do_xfer_with_response(const struct scmi_handle *handle,
418			       struct scmi_xfer *xfer)
419{
420	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
421	DECLARE_COMPLETION_ONSTACK(async_response);
422
423	xfer->async_done = &async_response;
424
425	ret = scmi_do_xfer(handle, xfer);
426	if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
427		ret = -ETIMEDOUT;
 
 
 
 
428
429	xfer->async_done = NULL;
430	return ret;
431}
432
433/**
434 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
435 *
436 * @handle: Pointer to SCMI entity handle
437 * @msg_id: Message identifier
438 * @prot_id: Protocol identifier for the message
439 * @tx_size: transmit message size
440 * @rx_size: receive message size
441 * @p: pointer to the allocated and initialised message
442 *
443 * This function allocates the message using @scmi_xfer_get and
444 * initialise the header.
445 *
446 * Return: 0 if all went fine with @p pointing to message, else
447 *	corresponding error.
448 */
449int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
450		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
 
451{
452	int ret;
453	struct scmi_xfer *xfer;
454	struct scmi_info *info = handle_to_scmi_info(handle);
 
455	struct scmi_xfers_info *minfo = &info->tx_minfo;
456	struct device *dev = info->dev;
457
458	/* Ensure we have sane transfer sizes */
459	if (rx_size > info->desc->max_msg_size ||
460	    tx_size > info->desc->max_msg_size)
461		return -ERANGE;
462
463	xfer = scmi_xfer_get(handle, minfo);
464	if (IS_ERR(xfer)) {
465		ret = PTR_ERR(xfer);
466		dev_err(dev, "failed to get free message slot(%d)\n", ret);
467		return ret;
468	}
469
470	xfer->tx.len = tx_size;
471	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
472	xfer->hdr.id = msg_id;
473	xfer->hdr.protocol_id = prot_id;
474	xfer->hdr.poll_completion = false;
475
476	*p = xfer;
477
478	return 0;
479}
480
481/**
482 * scmi_version_get() - command to get the revision of the SCMI entity
483 *
484 * @handle: Pointer to SCMI entity handle
485 * @protocol: Protocol identifier for the message
486 * @version: Holds returned version of protocol.
487 *
488 * Updates the SCMI information in the internal data structure.
489 *
490 * Return: 0 if all went fine, else return appropriate error.
491 */
492int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
493		     u32 *version)
494{
495	int ret;
496	__le32 *rev_info;
497	struct scmi_xfer *t;
498
499	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
500				 sizeof(*version), &t);
501	if (ret)
502		return ret;
503
504	ret = scmi_do_xfer(handle, t);
505	if (!ret) {
506		rev_info = t->rx.buf;
507		*version = le32_to_cpu(*rev_info);
508	}
509
510	scmi_xfer_put(handle, t);
511	return ret;
512}
513
514void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
515				     u8 *prot_imp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517	struct scmi_info *info = handle_to_scmi_info(handle);
518
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
519	info->protocols_imp = prot_imp;
520}
521
522static bool
523scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
524{
525	int i;
526	struct scmi_info *info = handle_to_scmi_info(handle);
527
528	if (!info->protocols_imp)
529		return false;
530
531	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
532		if (info->protocols_imp[i] == prot_id)
533			return true;
534	return false;
535}
536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537/**
538 * scmi_handle_get() - Get the SCMI handle for a device
539 *
540 * @dev: pointer to device for which we want SCMI handle
541 *
542 * NOTE: The function does not track individual clients of the framework
543 * and is expected to be maintained by caller of SCMI protocol library.
544 * scmi_handle_put must be balanced with successful scmi_handle_get
545 *
546 * Return: pointer to handle if successful, NULL on error
547 */
548struct scmi_handle *scmi_handle_get(struct device *dev)
549{
550	struct list_head *p;
551	struct scmi_info *info;
552	struct scmi_handle *handle = NULL;
553
554	mutex_lock(&scmi_list_mutex);
555	list_for_each(p, &scmi_list) {
556		info = list_entry(p, struct scmi_info, node);
557		if (dev->parent == info->dev) {
558			handle = &info->handle;
559			info->users++;
560			break;
561		}
562	}
563	mutex_unlock(&scmi_list_mutex);
564
565	return handle;
566}
567
568/**
569 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
570 *
571 * @handle: handle acquired by scmi_handle_get
572 *
573 * NOTE: The function does not track individual clients of the framework
574 * and is expected to be maintained by caller of SCMI protocol library.
575 * scmi_handle_put must be balanced with successful scmi_handle_get
576 *
577 * Return: 0 is successfully released
578 *	if null was passed, it returns -EINVAL;
579 */
580int scmi_handle_put(const struct scmi_handle *handle)
581{
582	struct scmi_info *info;
583
584	if (!handle)
585		return -EINVAL;
586
587	info = handle_to_scmi_info(handle);
588	mutex_lock(&scmi_list_mutex);
589	if (!WARN_ON(!info->users))
590		info->users--;
591	mutex_unlock(&scmi_list_mutex);
592
593	return 0;
594}
595
596static int __scmi_xfer_info_init(struct scmi_info *sinfo,
597				 struct scmi_xfers_info *info)
598{
599	int i;
600	struct scmi_xfer *xfer;
601	struct device *dev = sinfo->dev;
602	const struct scmi_desc *desc = sinfo->desc;
603
604	/* Pre-allocated messages, no more than what hdr.seq can support */
605	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
606		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
 
607			desc->max_msg, MSG_TOKEN_MAX);
608		return -EINVAL;
609	}
610
611	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
612					sizeof(*info->xfer_block), GFP_KERNEL);
613	if (!info->xfer_block)
614		return -ENOMEM;
615
616	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
617					      sizeof(long), GFP_KERNEL);
618	if (!info->xfer_alloc_table)
619		return -ENOMEM;
620
621	/* Pre-initialize the buffer pointer to pre-allocated buffers */
622	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
623		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
624					    GFP_KERNEL);
625		if (!xfer->rx.buf)
626			return -ENOMEM;
627
628		xfer->tx.buf = xfer->rx.buf;
629		init_completion(&xfer->done);
630	}
631
632	spin_lock_init(&info->xfer_lock);
633
634	return 0;
635}
636
637static int scmi_xfer_info_init(struct scmi_info *sinfo)
638{
639	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
640
641	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
642		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
643
644	return ret;
645}
646
647static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
648			   int prot_id, bool tx)
649{
650	int ret, idx;
651	struct scmi_chan_info *cinfo;
652	struct idr *idr;
653
654	/* Transmit channel is first entry i.e. index 0 */
655	idx = tx ? 0 : 1;
656	idr = tx ? &info->tx_idr : &info->rx_idr;
657
658	/* check if already allocated, used for multiple device per protocol */
659	cinfo = idr_find(idr, prot_id);
660	if (cinfo)
661		return 0;
662
663	if (!info->desc->ops->chan_available(dev, idx)) {
664		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
665		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
666			return -EINVAL;
667		goto idr_alloc;
668	}
669
670	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
671	if (!cinfo)
672		return -ENOMEM;
673
674	cinfo->dev = dev;
675
676	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
677	if (ret)
678		return ret;
679
680idr_alloc:
681	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
682	if (ret != prot_id) {
683		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
684		return ret;
685	}
686
687	cinfo->handle = &info->handle;
688	return 0;
689}
690
691static inline int
692scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
693{
694	int ret = scmi_chan_setup(info, dev, prot_id, true);
695
696	if (!ret) /* Rx is optional, hence no error check */
697		scmi_chan_setup(info, dev, prot_id, false);
698
699	return ret;
700}
701
702static inline void
703scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
704			    int prot_id, const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
705{
706	struct scmi_device *sdev;
707
 
 
 
 
 
 
 
708	sdev = scmi_device_create(np, info->dev, prot_id, name);
709	if (!sdev) {
710		dev_err(info->dev, "failed to create %d protocol device\n",
711			prot_id);
712		return;
713	}
714
715	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
716		dev_err(&sdev->dev, "failed to setup transport\n");
717		scmi_device_destroy(sdev);
718		return;
719	}
720
 
 
 
 
 
 
 
 
 
 
 
 
 
721	/* setup handle now as the transport is ready */
722	scmi_set_handle(sdev);
723}
724
725#define MAX_SCMI_DEV_PER_PROTOCOL	2
726struct scmi_prot_devnames {
727	int protocol_id;
728	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
729};
 
 
 
 
 
 
 
 
 
 
730
731static struct scmi_prot_devnames devnames[] = {
732	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
733	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
734	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
735	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
736	{ SCMI_PROTOCOL_RESET,  { "reset" },},
737};
 
 
 
 
738
739static inline void
740scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
741			     int prot_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
742{
743	int loop, cnt;
 
 
 
 
744
745	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
746		if (devnames[loop].protocol_id != prot_id)
747			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748
749		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
750			const char *name = devnames[loop].names[cnt];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751
752			if (name)
753				scmi_create_protocol_device(np, info, prot_id,
754							    name);
 
755		}
756	}
 
757}
758
759static int scmi_probe(struct platform_device *pdev)
760{
761	int ret;
762	struct scmi_handle *handle;
763	const struct scmi_desc *desc;
764	struct scmi_info *info;
765	struct device *dev = &pdev->dev;
766	struct device_node *child, *np = dev->of_node;
767
768	desc = of_device_get_match_data(dev);
769	if (!desc)
770		return -EINVAL;
771
772	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
773	if (!info)
774		return -ENOMEM;
775
776	info->dev = dev;
777	info->desc = desc;
778	INIT_LIST_HEAD(&info->node);
 
 
 
779
780	platform_set_drvdata(pdev, info);
781	idr_init(&info->tx_idr);
782	idr_init(&info->rx_idr);
783
784	handle = &info->handle;
785	handle->dev = info->dev;
786	handle->version = &info->version;
 
 
787
788	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
789	if (ret)
790		return ret;
791
792	ret = scmi_xfer_info_init(info);
793	if (ret)
794		return ret;
795
796	if (scmi_notification_init(handle))
797		dev_err(dev, "SCMI Notifications NOT available.\n");
798
799	ret = scmi_base_protocol_init(handle);
 
 
 
 
 
800	if (ret) {
801		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
802		return ret;
803	}
804
805	mutex_lock(&scmi_list_mutex);
806	list_add_tail(&info->node, &scmi_list);
807	mutex_unlock(&scmi_list_mutex);
808
809	for_each_available_child_of_node(np, child) {
810		u32 prot_id;
811
812		if (of_property_read_u32(child, "reg", &prot_id))
813			continue;
814
815		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
816			dev_err(dev, "Out of range protocol %d\n", prot_id);
817
818		if (!scmi_is_protocol_implemented(handle, prot_id)) {
819			dev_err(dev, "SCMI protocol %d not implemented\n",
820				prot_id);
821			continue;
822		}
823
 
 
 
 
 
 
 
 
 
 
 
 
 
824		scmi_create_protocol_devices(child, info, prot_id);
825	}
826
827	return 0;
828}
829
830void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
831{
832	idr_remove(idr, id);
833}
834
835static int scmi_remove(struct platform_device *pdev)
836{
837	int ret = 0;
838	struct scmi_info *info = platform_get_drvdata(pdev);
839	struct idr *idr = &info->tx_idr;
840
841	scmi_notification_exit(&info->handle);
842
843	mutex_lock(&scmi_list_mutex);
844	if (info->users)
845		ret = -EBUSY;
846	else
847		list_del(&info->node);
848	mutex_unlock(&scmi_list_mutex);
849
850	if (ret)
851		return ret;
852
 
 
 
 
 
 
 
 
 
 
853	/* Safe to free channels since no more users */
854	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
855	idr_destroy(&info->tx_idr);
856
857	idr = &info->rx_idr;
858	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
859	idr_destroy(&info->rx_idr);
860
861	return ret;
862}
863
864static ssize_t protocol_version_show(struct device *dev,
865				     struct device_attribute *attr, char *buf)
866{
867	struct scmi_info *info = dev_get_drvdata(dev);
868
869	return sprintf(buf, "%u.%u\n", info->version.major_ver,
870		       info->version.minor_ver);
871}
872static DEVICE_ATTR_RO(protocol_version);
873
874static ssize_t firmware_version_show(struct device *dev,
875				     struct device_attribute *attr, char *buf)
876{
877	struct scmi_info *info = dev_get_drvdata(dev);
878
879	return sprintf(buf, "0x%x\n", info->version.impl_ver);
880}
881static DEVICE_ATTR_RO(firmware_version);
882
883static ssize_t vendor_id_show(struct device *dev,
884			      struct device_attribute *attr, char *buf)
885{
886	struct scmi_info *info = dev_get_drvdata(dev);
887
888	return sprintf(buf, "%s\n", info->version.vendor_id);
889}
890static DEVICE_ATTR_RO(vendor_id);
891
892static ssize_t sub_vendor_id_show(struct device *dev,
893				  struct device_attribute *attr, char *buf)
894{
895	struct scmi_info *info = dev_get_drvdata(dev);
896
897	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
898}
899static DEVICE_ATTR_RO(sub_vendor_id);
900
901static struct attribute *versions_attrs[] = {
902	&dev_attr_firmware_version.attr,
903	&dev_attr_protocol_version.attr,
904	&dev_attr_vendor_id.attr,
905	&dev_attr_sub_vendor_id.attr,
906	NULL,
907};
908ATTRIBUTE_GROUPS(versions);
909
910/* Each compatible listed below must have descriptor associated with it */
911static const struct of_device_id scmi_of_match[] = {
 
912	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
 
913#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
914	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
915#endif
916	{ /* Sentinel */ },
917};
918
919MODULE_DEVICE_TABLE(of, scmi_of_match);
920
921static struct platform_driver scmi_driver = {
922	.driver = {
923		   .name = "arm-scmi",
924		   .of_match_table = scmi_of_match,
925		   .dev_groups = versions_groups,
926		   },
927	.probe = scmi_probe,
928	.remove = scmi_remove,
929};
930
931module_platform_driver(scmi_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
932
933MODULE_ALIAS("platform: arm-scmi");
934MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
935MODULE_DESCRIPTION("ARM SCMI protocol driver");
936MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * System Control and Management Interface (SCMI) Message Protocol driver
   4 *
   5 * SCMI Message Protocol is used between the System Control Processor(SCP)
   6 * and the Application Processors(AP). The Message Handling Unit(MHU)
   7 * provides a mechanism for inter-processor communication between SCP's
   8 * Cortex M3 and AP.
   9 *
  10 * SCP offers control and management of the core/cluster power states,
  11 * various power domain DVFS including the core/cluster, certain system
  12 * clocks configuration, thermal sensors and many others.
  13 *
  14 * Copyright (C) 2018-2021 ARM Ltd.
  15 */
  16
  17#include <linux/bitmap.h>
  18#include <linux/device.h>
  19#include <linux/export.h>
  20#include <linux/idr.h>
  21#include <linux/io.h>
  22#include <linux/kernel.h>
  23#include <linux/ktime.h>
  24#include <linux/list.h>
  25#include <linux/module.h>
  26#include <linux/of_address.h>
  27#include <linux/of_device.h>
  28#include <linux/processor.h>
  29#include <linux/refcount.h>
  30#include <linux/slab.h>
  31
  32#include "common.h"
  33#include "notify.h"
  34
  35#define CREATE_TRACE_POINTS
  36#include <trace/events/scmi.h>
  37
  38enum scmi_error_codes {
  39	SCMI_SUCCESS = 0,	/* Success */
  40	SCMI_ERR_SUPPORT = -1,	/* Not supported */
  41	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
  42	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
  43	SCMI_ERR_ENTRY = -4,	/* Not found */
  44	SCMI_ERR_RANGE = -5,	/* Value out of range */
  45	SCMI_ERR_BUSY = -6,	/* Device busy */
  46	SCMI_ERR_COMMS = -7,	/* Communication Error */
  47	SCMI_ERR_GENERIC = -8,	/* Generic Error */
  48	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
  49	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
 
  50};
  51
  52/* List of all SCMI devices active in system */
  53static LIST_HEAD(scmi_list);
  54/* Protection for the entire list */
  55static DEFINE_MUTEX(scmi_list_mutex);
  56/* Track the unique id for the transfers for debug & profiling purpose */
  57static atomic_t transfer_last_id;
  58
  59static DEFINE_IDR(scmi_requested_devices);
  60static DEFINE_MUTEX(scmi_requested_devices_mtx);
  61
  62struct scmi_requested_dev {
  63	const struct scmi_device_id *id_table;
  64	struct list_head node;
  65};
  66
  67/**
  68 * struct scmi_xfers_info - Structure to manage transfer information
  69 *
  70 * @xfer_block: Preallocated Message array
  71 * @xfer_alloc_table: Bitmap table for allocated messages.
  72 *	Index of this bitmap table is also used for message
  73 *	sequence identifier.
  74 * @xfer_lock: Protection for message allocation
  75 */
  76struct scmi_xfers_info {
  77	struct scmi_xfer *xfer_block;
  78	unsigned long *xfer_alloc_table;
  79	spinlock_t xfer_lock;
  80};
  81
  82/**
  83 * struct scmi_protocol_instance  - Describe an initialized protocol instance.
  84 * @handle: Reference to the SCMI handle associated to this protocol instance.
  85 * @proto: A reference to the protocol descriptor.
  86 * @gid: A reference for per-protocol devres management.
  87 * @users: A refcount to track effective users of this protocol.
  88 * @priv: Reference for optional protocol private data.
  89 * @ph: An embedded protocol handle that will be passed down to protocol
  90 *	initialization code to identify this instance.
  91 *
  92 * Each protocol is initialized independently once for each SCMI platform in
  93 * which is defined by DT and implemented by the SCMI server fw.
  94 */
  95struct scmi_protocol_instance {
  96	const struct scmi_handle	*handle;
  97	const struct scmi_protocol	*proto;
  98	void				*gid;
  99	refcount_t			users;
 100	void				*priv;
 101	struct scmi_protocol_handle	ph;
 102};
 103
 104#define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
 105
 106/**
 107 * struct scmi_info - Structure representing a SCMI instance
 108 *
 109 * @dev: Device pointer
 110 * @desc: SoC description for this instance
 111 * @version: SCMI revision information containing protocol version,
 112 *	implementation version and (sub-)vendor identification.
 113 * @handle: Instance of SCMI handle to send to clients
 114 * @tx_minfo: Universal Transmit Message management info
 115 * @rx_minfo: Universal Receive Message management info
 116 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
 117 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
 118 * @protocols: IDR for protocols' instance descriptors initialized for
 119 *	       this SCMI instance: populated on protocol's first attempted
 120 *	       usage.
 121 * @protocols_mtx: A mutex to protect protocols instances initialization.
 122 * @protocols_imp: List of protocols implemented, currently maximum of
 123 *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
 124 * @active_protocols: IDR storing device_nodes for protocols actually defined
 125 *		      in the DT and confirmed as implemented by fw.
 126 * @notify_priv: Pointer to private data structure specific to notifications.
 127 * @node: List head
 128 * @users: Number of users of this instance
 129 */
 130struct scmi_info {
 131	struct device *dev;
 132	const struct scmi_desc *desc;
 133	struct scmi_revision_info version;
 134	struct scmi_handle handle;
 135	struct scmi_xfers_info tx_minfo;
 136	struct scmi_xfers_info rx_minfo;
 137	struct idr tx_idr;
 138	struct idr rx_idr;
 139	struct idr protocols;
 140	/* Ensure mutual exclusive access to protocols instance array */
 141	struct mutex protocols_mtx;
 142	u8 *protocols_imp;
 143	struct idr active_protocols;
 144	void *notify_priv;
 145	struct list_head node;
 146	int users;
 147};
 148
 149#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
 150
 151static const int scmi_linux_errmap[] = {
 152	/* better than switch case as long as return value is continuous */
 153	0,			/* SCMI_SUCCESS */
 154	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
 155	-EINVAL,		/* SCMI_ERR_PARAM */
 156	-EACCES,		/* SCMI_ERR_ACCESS */
 157	-ENOENT,		/* SCMI_ERR_ENTRY */
 158	-ERANGE,		/* SCMI_ERR_RANGE */
 159	-EBUSY,			/* SCMI_ERR_BUSY */
 160	-ECOMM,			/* SCMI_ERR_COMMS */
 161	-EIO,			/* SCMI_ERR_GENERIC */
 162	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
 163	-EPROTO,		/* SCMI_ERR_PROTOCOL */
 164};
 165
 166static inline int scmi_to_linux_errno(int errno)
 167{
 168	int err_idx = -errno;
 169
 170	if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
 171		return scmi_linux_errmap[err_idx];
 172	return -EIO;
 173}
 174
 175/**
 176 * scmi_dump_header_dbg() - Helper to dump a message header.
 177 *
 178 * @dev: Device pointer corresponding to the SCMI entity
 179 * @hdr: pointer to header.
 180 */
 181static inline void scmi_dump_header_dbg(struct device *dev,
 182					struct scmi_msg_hdr *hdr)
 183{
 184	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
 185		hdr->id, hdr->seq, hdr->protocol_id);
 186}
 187
 188void scmi_notification_instance_data_set(const struct scmi_handle *handle,
 189					 void *priv)
 190{
 191	struct scmi_info *info = handle_to_scmi_info(handle);
 192
 193	info->notify_priv = priv;
 194	/* Ensure updated protocol private date are visible */
 195	smp_wmb();
 196}
 197
 198void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
 199{
 200	struct scmi_info *info = handle_to_scmi_info(handle);
 201
 202	/* Ensure protocols_private_data has been updated */
 203	smp_rmb();
 204	return info->notify_priv;
 205}
 206
 207/**
 208 * scmi_xfer_get() - Allocate one message
 209 *
 210 * @handle: Pointer to SCMI entity handle
 211 * @minfo: Pointer to Tx/Rx Message management info based on channel type
 212 *
 213 * Helper function which is used by various message functions that are
 214 * exposed to clients of this driver for allocating a message traffic event.
 215 *
 216 * This function can sleep depending on pending requests already in the system
 217 * for the SCMI entity. Further, this also holds a spinlock to maintain
 218 * integrity of internal data structures.
 219 *
 220 * Return: 0 if all went fine, else corresponding error.
 221 */
 222static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
 223				       struct scmi_xfers_info *minfo)
 224{
 225	u16 xfer_id;
 226	struct scmi_xfer *xfer;
 227	unsigned long flags, bit_pos;
 228	struct scmi_info *info = handle_to_scmi_info(handle);
 229
 230	/* Keep the locked section as small as possible */
 231	spin_lock_irqsave(&minfo->xfer_lock, flags);
 232	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
 233				      info->desc->max_msg);
 234	if (bit_pos == info->desc->max_msg) {
 235		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 236		return ERR_PTR(-ENOMEM);
 237	}
 238	set_bit(bit_pos, minfo->xfer_alloc_table);
 239	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 240
 241	xfer_id = bit_pos;
 242
 243	xfer = &minfo->xfer_block[xfer_id];
 244	xfer->hdr.seq = xfer_id;
 
 245	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
 246
 247	return xfer;
 248}
 249
 250/**
 251 * __scmi_xfer_put() - Release a message
 252 *
 253 * @minfo: Pointer to Tx/Rx Message management info based on channel type
 254 * @xfer: message that was reserved by scmi_xfer_get
 255 *
 256 * This holds a spinlock to maintain integrity of internal data structures.
 257 */
 258static void
 259__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
 260{
 261	unsigned long flags;
 262
 263	/*
 264	 * Keep the locked section as small as possible
 265	 * NOTE: we might escape with smp_mb and no lock here..
 266	 * but just be conservative and symmetric.
 267	 */
 268	spin_lock_irqsave(&minfo->xfer_lock, flags);
 269	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
 270	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 271}
 272
 273static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
 274{
 275	struct scmi_xfer *xfer;
 276	struct device *dev = cinfo->dev;
 277	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
 278	struct scmi_xfers_info *minfo = &info->rx_minfo;
 279	ktime_t ts;
 280
 281	ts = ktime_get_boottime();
 282	xfer = scmi_xfer_get(cinfo->handle, minfo);
 283	if (IS_ERR(xfer)) {
 284		dev_err(dev, "failed to get free message slot (%ld)\n",
 285			PTR_ERR(xfer));
 286		info->desc->ops->clear_channel(cinfo);
 287		return;
 288	}
 289
 290	unpack_scmi_header(msg_hdr, &xfer->hdr);
 291	scmi_dump_header_dbg(dev, &xfer->hdr);
 292	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
 293					    xfer);
 294	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
 295		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
 296
 297	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
 298			   xfer->hdr.protocol_id, xfer->hdr.seq,
 299			   MSG_TYPE_NOTIFICATION);
 300
 301	__scmi_xfer_put(minfo, xfer);
 302
 303	info->desc->ops->clear_channel(cinfo);
 304}
 305
 306static void scmi_handle_response(struct scmi_chan_info *cinfo,
 307				 u16 xfer_id, u8 msg_type)
 308{
 309	struct scmi_xfer *xfer;
 310	struct device *dev = cinfo->dev;
 311	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
 312	struct scmi_xfers_info *minfo = &info->tx_minfo;
 313
 314	/* Are we even expecting this? */
 315	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
 316		dev_err(dev, "message for %d is not expected!\n", xfer_id);
 317		info->desc->ops->clear_channel(cinfo);
 318		return;
 319	}
 320
 321	xfer = &minfo->xfer_block[xfer_id];
 322	/*
 323	 * Even if a response was indeed expected on this slot at this point,
 324	 * a buggy platform could wrongly reply feeding us an unexpected
 325	 * delayed response we're not prepared to handle: bail-out safely
 326	 * blaming firmware.
 327	 */
 328	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
 329		dev_err(dev,
 330			"Delayed Response for %d not expected! Buggy F/W ?\n",
 331			xfer_id);
 332		info->desc->ops->clear_channel(cinfo);
 333		/* It was unexpected, so nobody will clear the xfer if not us */
 334		__scmi_xfer_put(minfo, xfer);
 335		return;
 336	}
 337
 338	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
 339	if (msg_type == MSG_TYPE_DELAYED_RESP)
 340		xfer->rx.len = info->desc->max_msg_size;
 341
 342	scmi_dump_header_dbg(dev, &xfer->hdr);
 343
 344	info->desc->ops->fetch_response(cinfo, xfer);
 345
 346	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
 347			   xfer->hdr.protocol_id, xfer->hdr.seq,
 348			   msg_type);
 349
 350	if (msg_type == MSG_TYPE_DELAYED_RESP) {
 351		info->desc->ops->clear_channel(cinfo);
 352		complete(xfer->async_done);
 353	} else {
 354		complete(&xfer->done);
 355	}
 356}
 357
 358/**
 359 * scmi_rx_callback() - callback for receiving messages
 360 *
 361 * @cinfo: SCMI channel info
 362 * @msg_hdr: Message header
 363 *
 364 * Processes one received message to appropriate transfer information and
 365 * signals completion of the transfer.
 366 *
 367 * NOTE: This function will be invoked in IRQ context, hence should be
 368 * as optimal as possible.
 369 */
 370void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
 371{
 372	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
 373	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
 374
 375	switch (msg_type) {
 376	case MSG_TYPE_NOTIFICATION:
 377		scmi_handle_notification(cinfo, msg_hdr);
 378		break;
 379	case MSG_TYPE_COMMAND:
 380	case MSG_TYPE_DELAYED_RESP:
 381		scmi_handle_response(cinfo, xfer_id, msg_type);
 382		break;
 383	default:
 384		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
 385		break;
 386	}
 387}
 388
 389/**
 390 * xfer_put() - Release a transmit message
 391 *
 392 * @ph: Pointer to SCMI protocol handle
 393 * @xfer: message that was reserved by scmi_xfer_get
 394 */
 395static void xfer_put(const struct scmi_protocol_handle *ph,
 396		     struct scmi_xfer *xfer)
 397{
 398	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 399	struct scmi_info *info = handle_to_scmi_info(pi->handle);
 400
 401	__scmi_xfer_put(&info->tx_minfo, xfer);
 402}
 403
 404#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
 405
 406static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
 407				      struct scmi_xfer *xfer, ktime_t stop)
 408{
 409	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
 410
 411	return info->desc->ops->poll_done(cinfo, xfer) ||
 412	       ktime_after(ktime_get(), stop);
 413}
 414
 415/**
 416 * do_xfer() - Do one transfer
 417 *
 418 * @ph: Pointer to SCMI protocol handle
 419 * @xfer: Transfer to initiate and wait for response
 420 *
 421 * Return: -ETIMEDOUT in case of no response, if transmit error,
 422 *	return corresponding error, else if all goes well,
 423 *	return 0.
 424 */
 425static int do_xfer(const struct scmi_protocol_handle *ph,
 426		   struct scmi_xfer *xfer)
 427{
 428	int ret;
 429	int timeout;
 430	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 431	struct scmi_info *info = handle_to_scmi_info(pi->handle);
 432	struct device *dev = info->dev;
 433	struct scmi_chan_info *cinfo;
 434
 435	/*
 436	 * Initialise protocol id now from protocol handle to avoid it being
 437	 * overridden by mistake (or malice) by the protocol code mangling with
 438	 * the scmi_xfer structure prior to this.
 439	 */
 440	xfer->hdr.protocol_id = pi->proto->id;
 441	reinit_completion(&xfer->done);
 442
 443	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
 444	if (unlikely(!cinfo))
 445		return -EINVAL;
 446
 447	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
 448			      xfer->hdr.protocol_id, xfer->hdr.seq,
 449			      xfer->hdr.poll_completion);
 450
 451	ret = info->desc->ops->send_message(cinfo, xfer);
 452	if (ret < 0) {
 453		dev_dbg(dev, "Failed to send message %d\n", ret);
 454		return ret;
 455	}
 456
 457	if (xfer->hdr.poll_completion) {
 458		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
 459
 460		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
 461
 462		if (ktime_before(ktime_get(), stop))
 463			info->desc->ops->fetch_response(cinfo, xfer);
 464		else
 465			ret = -ETIMEDOUT;
 466	} else {
 467		/* And we wait for the response. */
 468		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
 469		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
 470			dev_err(dev, "timed out in resp(caller: %pS)\n",
 471				(void *)_RET_IP_);
 472			ret = -ETIMEDOUT;
 473		}
 474	}
 475
 476	if (!ret && xfer->hdr.status)
 477		ret = scmi_to_linux_errno(xfer->hdr.status);
 478
 479	if (info->desc->ops->mark_txdone)
 480		info->desc->ops->mark_txdone(cinfo, ret);
 481
 482	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
 483			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
 484
 485	return ret;
 486}
 487
 488static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
 489			      struct scmi_xfer *xfer)
 490{
 491	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 492	struct scmi_info *info = handle_to_scmi_info(pi->handle);
 493
 494	xfer->rx.len = info->desc->max_msg_size;
 495}
 496
 497#define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
 498
 499/**
 500 * do_xfer_with_response() - Do one transfer and wait until the delayed
 501 *	response is received
 502 *
 503 * @ph: Pointer to SCMI protocol handle
 504 * @xfer: Transfer to initiate and wait for response
 505 *
 506 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
 507 *	return corresponding error, else if all goes well, return 0.
 508 */
 509static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
 510				 struct scmi_xfer *xfer)
 511{
 512	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
 513	DECLARE_COMPLETION_ONSTACK(async_response);
 514
 515	xfer->async_done = &async_response;
 516
 517	ret = do_xfer(ph, xfer);
 518	if (!ret) {
 519		if (!wait_for_completion_timeout(xfer->async_done, timeout))
 520			ret = -ETIMEDOUT;
 521		else if (xfer->hdr.status)
 522			ret = scmi_to_linux_errno(xfer->hdr.status);
 523	}
 524
 525	xfer->async_done = NULL;
 526	return ret;
 527}
 528
 529/**
 530 * xfer_get_init() - Allocate and initialise one message for transmit
 531 *
 532 * @ph: Pointer to SCMI protocol handle
 533 * @msg_id: Message identifier
 
 534 * @tx_size: transmit message size
 535 * @rx_size: receive message size
 536 * @p: pointer to the allocated and initialised message
 537 *
 538 * This function allocates the message using @scmi_xfer_get and
 539 * initialise the header.
 540 *
 541 * Return: 0 if all went fine with @p pointing to message, else
 542 *	corresponding error.
 543 */
 544static int xfer_get_init(const struct scmi_protocol_handle *ph,
 545			 u8 msg_id, size_t tx_size, size_t rx_size,
 546			 struct scmi_xfer **p)
 547{
 548	int ret;
 549	struct scmi_xfer *xfer;
 550	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 551	struct scmi_info *info = handle_to_scmi_info(pi->handle);
 552	struct scmi_xfers_info *minfo = &info->tx_minfo;
 553	struct device *dev = info->dev;
 554
 555	/* Ensure we have sane transfer sizes */
 556	if (rx_size > info->desc->max_msg_size ||
 557	    tx_size > info->desc->max_msg_size)
 558		return -ERANGE;
 559
 560	xfer = scmi_xfer_get(pi->handle, minfo);
 561	if (IS_ERR(xfer)) {
 562		ret = PTR_ERR(xfer);
 563		dev_err(dev, "failed to get free message slot(%d)\n", ret);
 564		return ret;
 565	}
 566
 567	xfer->tx.len = tx_size;
 568	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
 569	xfer->hdr.id = msg_id;
 
 570	xfer->hdr.poll_completion = false;
 571
 572	*p = xfer;
 573
 574	return 0;
 575}
 576
 577/**
 578 * version_get() - command to get the revision of the SCMI entity
 579 *
 580 * @ph: Pointer to SCMI protocol handle
 
 581 * @version: Holds returned version of protocol.
 582 *
 583 * Updates the SCMI information in the internal data structure.
 584 *
 585 * Return: 0 if all went fine, else return appropriate error.
 586 */
 587static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
 
 588{
 589	int ret;
 590	__le32 *rev_info;
 591	struct scmi_xfer *t;
 592
 593	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
 
 594	if (ret)
 595		return ret;
 596
 597	ret = do_xfer(ph, t);
 598	if (!ret) {
 599		rev_info = t->rx.buf;
 600		*version = le32_to_cpu(*rev_info);
 601	}
 602
 603	xfer_put(ph, t);
 604	return ret;
 605}
 606
 607/**
 608 * scmi_set_protocol_priv  - Set protocol specific data at init time
 609 *
 610 * @ph: A reference to the protocol handle.
 611 * @priv: The private data to set.
 612 *
 613 * Return: 0 on Success
 614 */
 615static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
 616				  void *priv)
 617{
 618	struct scmi_protocol_instance *pi = ph_to_pi(ph);
 619
 620	pi->priv = priv;
 621
 622	return 0;
 623}
 624
 625/**
 626 * scmi_get_protocol_priv  - Set protocol specific data at init time
 627 *
 628 * @ph: A reference to the protocol handle.
 629 *
 630 * Return: Protocol private data if any was set.
 631 */
 632static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
 633{
 634	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 635
 636	return pi->priv;
 637}
 638
 639static const struct scmi_xfer_ops xfer_ops = {
 640	.version_get = version_get,
 641	.xfer_get_init = xfer_get_init,
 642	.reset_rx_to_maxsz = reset_rx_to_maxsz,
 643	.do_xfer = do_xfer,
 644	.do_xfer_with_response = do_xfer_with_response,
 645	.xfer_put = xfer_put,
 646};
 647
 648/**
 649 * scmi_revision_area_get  - Retrieve version memory area.
 650 *
 651 * @ph: A reference to the protocol handle.
 652 *
 653 * A helper to grab the version memory area reference during SCMI Base protocol
 654 * initialization.
 655 *
 656 * Return: A reference to the version memory area associated to the SCMI
 657 *	   instance underlying this protocol handle.
 658 */
 659struct scmi_revision_info *
 660scmi_revision_area_get(const struct scmi_protocol_handle *ph)
 661{
 662	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 663
 664	return pi->handle->version;
 665}
 666
 667/**
 668 * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
 669 * instance descriptor.
 670 * @info: The reference to the related SCMI instance.
 671 * @proto: The protocol descriptor.
 672 *
 673 * Allocate a new protocol instance descriptor, using the provided @proto
 674 * description, against the specified SCMI instance @info, and initialize it;
 675 * all resources management is handled via a dedicated per-protocol devres
 676 * group.
 677 *
 678 * Context: Assumes to be called with @protocols_mtx already acquired.
 679 * Return: A reference to a freshly allocated and initialized protocol instance
 680 *	   or ERR_PTR on failure. On failure the @proto reference is at first
 681 *	   put using @scmi_protocol_put() before releasing all the devres group.
 682 */
 683static struct scmi_protocol_instance *
 684scmi_alloc_init_protocol_instance(struct scmi_info *info,
 685				  const struct scmi_protocol *proto)
 686{
 687	int ret = -ENOMEM;
 688	void *gid;
 689	struct scmi_protocol_instance *pi;
 690	const struct scmi_handle *handle = &info->handle;
 691
 692	/* Protocol specific devres group */
 693	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
 694	if (!gid) {
 695		scmi_protocol_put(proto->id);
 696		goto out;
 697	}
 698
 699	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
 700	if (!pi)
 701		goto clean;
 702
 703	pi->gid = gid;
 704	pi->proto = proto;
 705	pi->handle = handle;
 706	pi->ph.dev = handle->dev;
 707	pi->ph.xops = &xfer_ops;
 708	pi->ph.set_priv = scmi_set_protocol_priv;
 709	pi->ph.get_priv = scmi_get_protocol_priv;
 710	refcount_set(&pi->users, 1);
 711	/* proto->init is assured NON NULL by scmi_protocol_register */
 712	ret = pi->proto->instance_init(&pi->ph);
 713	if (ret)
 714		goto clean;
 715
 716	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
 717			GFP_KERNEL);
 718	if (ret != proto->id)
 719		goto clean;
 720
 721	/*
 722	 * Warn but ignore events registration errors since we do not want
 723	 * to skip whole protocols if their notifications are messed up.
 724	 */
 725	if (pi->proto->events) {
 726		ret = scmi_register_protocol_events(handle, pi->proto->id,
 727						    &pi->ph,
 728						    pi->proto->events);
 729		if (ret)
 730			dev_warn(handle->dev,
 731				 "Protocol:%X - Events Registration Failed - err:%d\n",
 732				 pi->proto->id, ret);
 733	}
 734
 735	devres_close_group(handle->dev, pi->gid);
 736	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
 737
 738	return pi;
 739
 740clean:
 741	/* Take care to put the protocol module's owner before releasing all */
 742	scmi_protocol_put(proto->id);
 743	devres_release_group(handle->dev, gid);
 744out:
 745	return ERR_PTR(ret);
 746}
 747
 748/**
 749 * scmi_get_protocol_instance  - Protocol initialization helper.
 750 * @handle: A reference to the SCMI platform instance.
 751 * @protocol_id: The protocol being requested.
 752 *
 753 * In case the required protocol has never been requested before for this
 754 * instance, allocate and initialize all the needed structures while handling
 755 * resource allocation with a dedicated per-protocol devres subgroup.
 756 *
 757 * Return: A reference to an initialized protocol instance or error on failure:
 758 *	   in particular returns -EPROBE_DEFER when the desired protocol could
 759 *	   NOT be found.
 760 */
 761static struct scmi_protocol_instance * __must_check
 762scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
 763{
 764	struct scmi_protocol_instance *pi;
 765	struct scmi_info *info = handle_to_scmi_info(handle);
 766
 767	mutex_lock(&info->protocols_mtx);
 768	pi = idr_find(&info->protocols, protocol_id);
 769
 770	if (pi) {
 771		refcount_inc(&pi->users);
 772	} else {
 773		const struct scmi_protocol *proto;
 774
 775		/* Fails if protocol not registered on bus */
 776		proto = scmi_protocol_get(protocol_id);
 777		if (proto)
 778			pi = scmi_alloc_init_protocol_instance(info, proto);
 779		else
 780			pi = ERR_PTR(-EPROBE_DEFER);
 781	}
 782	mutex_unlock(&info->protocols_mtx);
 783
 784	return pi;
 785}
 786
 787/**
 788 * scmi_protocol_acquire  - Protocol acquire
 789 * @handle: A reference to the SCMI platform instance.
 790 * @protocol_id: The protocol being requested.
 791 *
 792 * Register a new user for the requested protocol on the specified SCMI
 793 * platform instance, possibly triggering its initialization on first user.
 794 *
 795 * Return: 0 if protocol was acquired successfully.
 796 */
 797int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
 798{
 799	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
 800}
 801
 802/**
 803 * scmi_protocol_release  - Protocol de-initialization helper.
 804 * @handle: A reference to the SCMI platform instance.
 805 * @protocol_id: The protocol being requested.
 806 *
 807 * Remove one user for the specified protocol and triggers de-initialization
 808 * and resources de-allocation once the last user has gone.
 809 */
 810void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
 811{
 812	struct scmi_info *info = handle_to_scmi_info(handle);
 813	struct scmi_protocol_instance *pi;
 814
 815	mutex_lock(&info->protocols_mtx);
 816	pi = idr_find(&info->protocols, protocol_id);
 817	if (WARN_ON(!pi))
 818		goto out;
 819
 820	if (refcount_dec_and_test(&pi->users)) {
 821		void *gid = pi->gid;
 822
 823		if (pi->proto->events)
 824			scmi_deregister_protocol_events(handle, protocol_id);
 825
 826		if (pi->proto->instance_deinit)
 827			pi->proto->instance_deinit(&pi->ph);
 828
 829		idr_remove(&info->protocols, protocol_id);
 830
 831		scmi_protocol_put(protocol_id);
 832
 833		devres_release_group(handle->dev, gid);
 834		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
 835			protocol_id);
 836	}
 837
 838out:
 839	mutex_unlock(&info->protocols_mtx);
 840}
 841
 842void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
 843				     u8 *prot_imp)
 844{
 845	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
 846	struct scmi_info *info = handle_to_scmi_info(pi->handle);
 847
 848	info->protocols_imp = prot_imp;
 849}
 850
 851static bool
 852scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
 853{
 854	int i;
 855	struct scmi_info *info = handle_to_scmi_info(handle);
 856
 857	if (!info->protocols_imp)
 858		return false;
 859
 860	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
 861		if (info->protocols_imp[i] == prot_id)
 862			return true;
 863	return false;
 864}
 865
 866struct scmi_protocol_devres {
 867	const struct scmi_handle *handle;
 868	u8 protocol_id;
 869};
 870
 871static void scmi_devm_release_protocol(struct device *dev, void *res)
 872{
 873	struct scmi_protocol_devres *dres = res;
 874
 875	scmi_protocol_release(dres->handle, dres->protocol_id);
 876}
 877
 878/**
 879 * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
 880 * @sdev: A reference to an scmi_device whose embedded struct device is to
 881 *	  be used for devres accounting.
 882 * @protocol_id: The protocol being requested.
 883 * @ph: A pointer reference used to pass back the associated protocol handle.
 884 *
 885 * Get hold of a protocol accounting for its usage, eventually triggering its
 886 * initialization, and returning the protocol specific operations and related
 887 * protocol handle which will be used as first argument in most of the
 888 * protocols operations methods.
 889 * Being a devres based managed method, protocol hold will be automatically
 890 * released, and possibly de-initialized on last user, once the SCMI driver
 891 * owning the scmi_device is unbound from it.
 892 *
 893 * Return: A reference to the requested protocol operations or error.
 894 *	   Must be checked for errors by caller.
 895 */
 896static const void __must_check *
 897scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
 898		       struct scmi_protocol_handle **ph)
 899{
 900	struct scmi_protocol_instance *pi;
 901	struct scmi_protocol_devres *dres;
 902	struct scmi_handle *handle = sdev->handle;
 903
 904	if (!ph)
 905		return ERR_PTR(-EINVAL);
 906
 907	dres = devres_alloc(scmi_devm_release_protocol,
 908			    sizeof(*dres), GFP_KERNEL);
 909	if (!dres)
 910		return ERR_PTR(-ENOMEM);
 911
 912	pi = scmi_get_protocol_instance(handle, protocol_id);
 913	if (IS_ERR(pi)) {
 914		devres_free(dres);
 915		return pi;
 916	}
 917
 918	dres->handle = handle;
 919	dres->protocol_id = protocol_id;
 920	devres_add(&sdev->dev, dres);
 921
 922	*ph = &pi->ph;
 923
 924	return pi->proto->ops;
 925}
 926
 927static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
 928{
 929	struct scmi_protocol_devres *dres = res;
 930
 931	if (WARN_ON(!dres || !data))
 932		return 0;
 933
 934	return dres->protocol_id == *((u8 *)data);
 935}
 936
 937/**
 938 * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
 939 * @sdev: A reference to an scmi_device whose embedded struct device is to
 940 *	  be used for devres accounting.
 941 * @protocol_id: The protocol being requested.
 942 *
 943 * Explicitly release a protocol hold previously obtained calling the above
 944 * @scmi_devm_protocol_get.
 945 */
 946static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
 947{
 948	int ret;
 949
 950	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
 951			     scmi_devm_protocol_match, &protocol_id);
 952	WARN_ON(ret);
 953}
 954
 955static inline
 956struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
 957{
 958	info->users++;
 959	return &info->handle;
 960}
 961
 962/**
 963 * scmi_handle_get() - Get the SCMI handle for a device
 964 *
 965 * @dev: pointer to device for which we want SCMI handle
 966 *
 967 * NOTE: The function does not track individual clients of the framework
 968 * and is expected to be maintained by caller of SCMI protocol library.
 969 * scmi_handle_put must be balanced with successful scmi_handle_get
 970 *
 971 * Return: pointer to handle if successful, NULL on error
 972 */
 973struct scmi_handle *scmi_handle_get(struct device *dev)
 974{
 975	struct list_head *p;
 976	struct scmi_info *info;
 977	struct scmi_handle *handle = NULL;
 978
 979	mutex_lock(&scmi_list_mutex);
 980	list_for_each(p, &scmi_list) {
 981		info = list_entry(p, struct scmi_info, node);
 982		if (dev->parent == info->dev) {
 983			handle = scmi_handle_get_from_info_unlocked(info);
 
 984			break;
 985		}
 986	}
 987	mutex_unlock(&scmi_list_mutex);
 988
 989	return handle;
 990}
 991
 992/**
 993 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
 994 *
 995 * @handle: handle acquired by scmi_handle_get
 996 *
 997 * NOTE: The function does not track individual clients of the framework
 998 * and is expected to be maintained by caller of SCMI protocol library.
 999 * scmi_handle_put must be balanced with successful scmi_handle_get
1000 *
1001 * Return: 0 is successfully released
1002 *	if null was passed, it returns -EINVAL;
1003 */
1004int scmi_handle_put(const struct scmi_handle *handle)
1005{
1006	struct scmi_info *info;
1007
1008	if (!handle)
1009		return -EINVAL;
1010
1011	info = handle_to_scmi_info(handle);
1012	mutex_lock(&scmi_list_mutex);
1013	if (!WARN_ON(!info->users))
1014		info->users--;
1015	mutex_unlock(&scmi_list_mutex);
1016
1017	return 0;
1018}
1019
1020static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1021				 struct scmi_xfers_info *info)
1022{
1023	int i;
1024	struct scmi_xfer *xfer;
1025	struct device *dev = sinfo->dev;
1026	const struct scmi_desc *desc = sinfo->desc;
1027
1028	/* Pre-allocated messages, no more than what hdr.seq can support */
1029	if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
1030		dev_err(dev,
1031			"Invalid maximum messages %d, not in range [1 - %lu]\n",
1032			desc->max_msg, MSG_TOKEN_MAX);
1033		return -EINVAL;
1034	}
1035
1036	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
1037					sizeof(*info->xfer_block), GFP_KERNEL);
1038	if (!info->xfer_block)
1039		return -ENOMEM;
1040
1041	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
1042					      sizeof(long), GFP_KERNEL);
1043	if (!info->xfer_alloc_table)
1044		return -ENOMEM;
1045
1046	/* Pre-initialize the buffer pointer to pre-allocated buffers */
1047	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
1048		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1049					    GFP_KERNEL);
1050		if (!xfer->rx.buf)
1051			return -ENOMEM;
1052
1053		xfer->tx.buf = xfer->rx.buf;
1054		init_completion(&xfer->done);
1055	}
1056
1057	spin_lock_init(&info->xfer_lock);
1058
1059	return 0;
1060}
1061
1062static int scmi_xfer_info_init(struct scmi_info *sinfo)
1063{
1064	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1065
1066	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1067		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1068
1069	return ret;
1070}
1071
1072static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1073			   int prot_id, bool tx)
1074{
1075	int ret, idx;
1076	struct scmi_chan_info *cinfo;
1077	struct idr *idr;
1078
1079	/* Transmit channel is first entry i.e. index 0 */
1080	idx = tx ? 0 : 1;
1081	idr = tx ? &info->tx_idr : &info->rx_idr;
1082
1083	/* check if already allocated, used for multiple device per protocol */
1084	cinfo = idr_find(idr, prot_id);
1085	if (cinfo)
1086		return 0;
1087
1088	if (!info->desc->ops->chan_available(dev, idx)) {
1089		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1090		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1091			return -EINVAL;
1092		goto idr_alloc;
1093	}
1094
1095	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1096	if (!cinfo)
1097		return -ENOMEM;
1098
1099	cinfo->dev = dev;
1100
1101	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1102	if (ret)
1103		return ret;
1104
1105idr_alloc:
1106	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1107	if (ret != prot_id) {
1108		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1109		return ret;
1110	}
1111
1112	cinfo->handle = &info->handle;
1113	return 0;
1114}
1115
1116static inline int
1117scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1118{
1119	int ret = scmi_chan_setup(info, dev, prot_id, true);
1120
1121	if (!ret) /* Rx is optional, hence no error check */
1122		scmi_chan_setup(info, dev, prot_id, false);
1123
1124	return ret;
1125}
1126
1127/**
1128 * scmi_get_protocol_device  - Helper to get/create an SCMI device.
1129 *
1130 * @np: A device node representing a valid active protocols for the referred
1131 * SCMI instance.
1132 * @info: The referred SCMI instance for which we are getting/creating this
1133 * device.
1134 * @prot_id: The protocol ID.
1135 * @name: The device name.
1136 *
1137 * Referring to the specific SCMI instance identified by @info, this helper
1138 * takes care to return a properly initialized device matching the requested
1139 * @proto_id and @name: if device was still not existent it is created as a
1140 * child of the specified SCMI instance @info and its transport properly
1141 * initialized as usual.
1142 *
1143 * Return: A properly initialized scmi device, NULL otherwise.
1144 */
1145static inline struct scmi_device *
1146scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1147			 int prot_id, const char *name)
1148{
1149	struct scmi_device *sdev;
1150
1151	/* Already created for this parent SCMI instance ? */
1152	sdev = scmi_child_dev_find(info->dev, prot_id, name);
1153	if (sdev)
1154		return sdev;
1155
1156	pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1157
1158	sdev = scmi_device_create(np, info->dev, prot_id, name);
1159	if (!sdev) {
1160		dev_err(info->dev, "failed to create %d protocol device\n",
1161			prot_id);
1162		return NULL;
1163	}
1164
1165	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1166		dev_err(&sdev->dev, "failed to setup transport\n");
1167		scmi_device_destroy(sdev);
1168		return NULL;
1169	}
1170
1171	return sdev;
1172}
1173
1174static inline void
1175scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1176			    int prot_id, const char *name)
1177{
1178	struct scmi_device *sdev;
1179
1180	sdev = scmi_get_protocol_device(np, info, prot_id, name);
1181	if (!sdev)
1182		return;
1183
1184	/* setup handle now as the transport is ready */
1185	scmi_set_handle(sdev);
1186}
1187
1188/**
1189 * scmi_create_protocol_devices  - Create devices for all pending requests for
1190 * this SCMI instance.
1191 *
1192 * @np: The device node describing the protocol
1193 * @info: The SCMI instance descriptor
1194 * @prot_id: The protocol ID
1195 *
1196 * All devices previously requested for this instance (if any) are found and
1197 * created by scanning the proper @&scmi_requested_devices entry.
1198 */
1199static void scmi_create_protocol_devices(struct device_node *np,
1200					 struct scmi_info *info, int prot_id)
1201{
1202	struct list_head *phead;
1203
1204	mutex_lock(&scmi_requested_devices_mtx);
1205	phead = idr_find(&scmi_requested_devices, prot_id);
1206	if (phead) {
1207		struct scmi_requested_dev *rdev;
1208
1209		list_for_each_entry(rdev, phead, node)
1210			scmi_create_protocol_device(np, info, prot_id,
1211						    rdev->id_table->name);
1212	}
1213	mutex_unlock(&scmi_requested_devices_mtx);
1214}
1215
1216/**
1217 * scmi_protocol_device_request  - Helper to request a device
1218 *
1219 * @id_table: A protocol/name pair descriptor for the device to be created.
1220 *
1221 * This helper let an SCMI driver request specific devices identified by the
1222 * @id_table to be created for each active SCMI instance.
1223 *
1224 * The requested device name MUST NOT be already existent for any protocol;
1225 * at first the freshly requested @id_table is annotated in the IDR table
1226 * @scmi_requested_devices, then a matching device is created for each already
1227 * active SCMI instance. (if any)
1228 *
1229 * This way the requested device is created straight-away for all the already
1230 * initialized(probed) SCMI instances (handles) and it remains also annotated
1231 * as pending creation if the requesting SCMI driver was loaded before some
1232 * SCMI instance and related transports were available: when such late instance
1233 * is probed, its probe will take care to scan the list of pending requested
1234 * devices and create those on its own (see @scmi_create_protocol_devices and
1235 * its enclosing loop)
1236 *
1237 * Return: 0 on Success
1238 */
1239int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1240{
1241	int ret = 0;
1242	unsigned int id = 0;
1243	struct list_head *head, *phead = NULL;
1244	struct scmi_requested_dev *rdev;
1245	struct scmi_info *info;
1246
1247	pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1248		 id_table->name, id_table->protocol_id);
1249
1250	/*
1251	 * Search for the matching protocol rdev list and then search
1252	 * of any existent equally named device...fails if any duplicate found.
1253	 */
1254	mutex_lock(&scmi_requested_devices_mtx);
1255	idr_for_each_entry(&scmi_requested_devices, head, id) {
1256		if (!phead) {
1257			/* A list found registered in the IDR is never empty */
1258			rdev = list_first_entry(head, struct scmi_requested_dev,
1259						node);
1260			if (rdev->id_table->protocol_id ==
1261			    id_table->protocol_id)
1262				phead = head;
1263		}
1264		list_for_each_entry(rdev, head, node) {
1265			if (!strcmp(rdev->id_table->name, id_table->name)) {
1266				pr_err("Ignoring duplicate request [%d] %s\n",
1267				       rdev->id_table->protocol_id,
1268				       rdev->id_table->name);
1269				ret = -EINVAL;
1270				goto out;
1271			}
1272		}
1273	}
1274
1275	/*
1276	 * No duplicate found for requested id_table, so let's create a new
1277	 * requested device entry for this new valid request.
1278	 */
1279	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1280	if (!rdev) {
1281		ret = -ENOMEM;
1282		goto out;
1283	}
1284	rdev->id_table = id_table;
1285
1286	/*
1287	 * Append the new requested device table descriptor to the head of the
1288	 * related protocol list, eventually creating such head if not already
1289	 * there.
1290	 */
1291	if (!phead) {
1292		phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1293		if (!phead) {
1294			kfree(rdev);
1295			ret = -ENOMEM;
1296			goto out;
1297		}
1298		INIT_LIST_HEAD(phead);
1299
1300		ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1301				id_table->protocol_id,
1302				id_table->protocol_id + 1, GFP_KERNEL);
1303		if (ret != id_table->protocol_id) {
1304			pr_err("Failed to save SCMI device - ret:%d\n", ret);
1305			kfree(rdev);
1306			kfree(phead);
1307			ret = -EINVAL;
1308			goto out;
1309		}
1310		ret = 0;
1311	}
1312	list_add(&rdev->node, phead);
1313
1314	/*
1315	 * Now effectively create and initialize the requested device for every
1316	 * already initialized SCMI instance which has registered the requested
1317	 * protocol as a valid active one: i.e. defined in DT and supported by
1318	 * current platform FW.
1319	 */
1320	mutex_lock(&scmi_list_mutex);
1321	list_for_each_entry(info, &scmi_list, node) {
1322		struct device_node *child;
1323
1324		child = idr_find(&info->active_protocols,
1325				 id_table->protocol_id);
1326		if (child) {
1327			struct scmi_device *sdev;
1328
1329			sdev = scmi_get_protocol_device(child, info,
1330							id_table->protocol_id,
1331							id_table->name);
1332			/* Set handle if not already set: device existed */
1333			if (sdev && !sdev->handle)
1334				sdev->handle =
1335					scmi_handle_get_from_info_unlocked(info);
1336		} else {
1337			dev_err(info->dev,
1338				"Failed. SCMI protocol %d not active.\n",
1339				id_table->protocol_id);
1340		}
1341	}
1342	mutex_unlock(&scmi_list_mutex);
1343
1344out:
1345	mutex_unlock(&scmi_requested_devices_mtx);
1346
1347	return ret;
1348}
1349
1350/**
1351 * scmi_protocol_device_unrequest  - Helper to unrequest a device
1352 *
1353 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1354 *
1355 * An helper to let an SCMI driver release its request about devices; note that
1356 * devices are created and initialized once the first SCMI driver request them
1357 * but they destroyed only on SCMI core unloading/unbinding.
1358 *
1359 * The current SCMI transport layer uses such devices as internal references and
1360 * as such they could be shared as same transport between multiple drivers so
1361 * that cannot be safely destroyed till the whole SCMI stack is removed.
1362 * (unless adding further burden of refcounting.)
1363 */
1364void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1365{
1366	struct list_head *phead;
1367
1368	pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1369		 id_table->name, id_table->protocol_id);
1370
1371	mutex_lock(&scmi_requested_devices_mtx);
1372	phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1373	if (phead) {
1374		struct scmi_requested_dev *victim, *tmp;
1375
1376		list_for_each_entry_safe(victim, tmp, phead, node) {
1377			if (!strcmp(victim->id_table->name, id_table->name)) {
1378				list_del(&victim->node);
1379				kfree(victim);
1380				break;
1381			}
1382		}
1383
1384		if (list_empty(phead)) {
1385			idr_remove(&scmi_requested_devices,
1386				   id_table->protocol_id);
1387			kfree(phead);
1388		}
1389	}
1390	mutex_unlock(&scmi_requested_devices_mtx);
1391}
1392
1393static int scmi_probe(struct platform_device *pdev)
1394{
1395	int ret;
1396	struct scmi_handle *handle;
1397	const struct scmi_desc *desc;
1398	struct scmi_info *info;
1399	struct device *dev = &pdev->dev;
1400	struct device_node *child, *np = dev->of_node;
1401
1402	desc = of_device_get_match_data(dev);
1403	if (!desc)
1404		return -EINVAL;
1405
1406	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1407	if (!info)
1408		return -ENOMEM;
1409
1410	info->dev = dev;
1411	info->desc = desc;
1412	INIT_LIST_HEAD(&info->node);
1413	idr_init(&info->protocols);
1414	mutex_init(&info->protocols_mtx);
1415	idr_init(&info->active_protocols);
1416
1417	platform_set_drvdata(pdev, info);
1418	idr_init(&info->tx_idr);
1419	idr_init(&info->rx_idr);
1420
1421	handle = &info->handle;
1422	handle->dev = info->dev;
1423	handle->version = &info->version;
1424	handle->devm_protocol_get = scmi_devm_protocol_get;
1425	handle->devm_protocol_put = scmi_devm_protocol_put;
1426
1427	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1428	if (ret)
1429		return ret;
1430
1431	ret = scmi_xfer_info_init(info);
1432	if (ret)
1433		return ret;
1434
1435	if (scmi_notification_init(handle))
1436		dev_err(dev, "SCMI Notifications NOT available.\n");
1437
1438	/*
1439	 * Trigger SCMI Base protocol initialization.
1440	 * It's mandatory and won't be ever released/deinit until the
1441	 * SCMI stack is shutdown/unloaded as a whole.
1442	 */
1443	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
1444	if (ret) {
1445		dev_err(dev, "unable to communicate with SCMI\n");
1446		return ret;
1447	}
1448
1449	mutex_lock(&scmi_list_mutex);
1450	list_add_tail(&info->node, &scmi_list);
1451	mutex_unlock(&scmi_list_mutex);
1452
1453	for_each_available_child_of_node(np, child) {
1454		u32 prot_id;
1455
1456		if (of_property_read_u32(child, "reg", &prot_id))
1457			continue;
1458
1459		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
1460			dev_err(dev, "Out of range protocol %d\n", prot_id);
1461
1462		if (!scmi_is_protocol_implemented(handle, prot_id)) {
1463			dev_err(dev, "SCMI protocol %d not implemented\n",
1464				prot_id);
1465			continue;
1466		}
1467
1468		/*
1469		 * Save this valid DT protocol descriptor amongst
1470		 * @active_protocols for this SCMI instance/
1471		 */
1472		ret = idr_alloc(&info->active_protocols, child,
1473				prot_id, prot_id + 1, GFP_KERNEL);
1474		if (ret != prot_id) {
1475			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1476				prot_id);
1477			continue;
1478		}
1479
1480		of_node_get(child);
1481		scmi_create_protocol_devices(child, info, prot_id);
1482	}
1483
1484	return 0;
1485}
1486
1487void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
1488{
1489	idr_remove(idr, id);
1490}
1491
1492static int scmi_remove(struct platform_device *pdev)
1493{
1494	int ret = 0, id;
1495	struct scmi_info *info = platform_get_drvdata(pdev);
1496	struct idr *idr = &info->tx_idr;
1497	struct device_node *child;
 
1498
1499	mutex_lock(&scmi_list_mutex);
1500	if (info->users)
1501		ret = -EBUSY;
1502	else
1503		list_del(&info->node);
1504	mutex_unlock(&scmi_list_mutex);
1505
1506	if (ret)
1507		return ret;
1508
1509	scmi_notification_exit(&info->handle);
1510
1511	mutex_lock(&info->protocols_mtx);
1512	idr_destroy(&info->protocols);
1513	mutex_unlock(&info->protocols_mtx);
1514
1515	idr_for_each_entry(&info->active_protocols, child, id)
1516		of_node_put(child);
1517	idr_destroy(&info->active_protocols);
1518
1519	/* Safe to free channels since no more users */
1520	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1521	idr_destroy(&info->tx_idr);
1522
1523	idr = &info->rx_idr;
1524	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1525	idr_destroy(&info->rx_idr);
1526
1527	return ret;
1528}
1529
1530static ssize_t protocol_version_show(struct device *dev,
1531				     struct device_attribute *attr, char *buf)
1532{
1533	struct scmi_info *info = dev_get_drvdata(dev);
1534
1535	return sprintf(buf, "%u.%u\n", info->version.major_ver,
1536		       info->version.minor_ver);
1537}
1538static DEVICE_ATTR_RO(protocol_version);
1539
1540static ssize_t firmware_version_show(struct device *dev,
1541				     struct device_attribute *attr, char *buf)
1542{
1543	struct scmi_info *info = dev_get_drvdata(dev);
1544
1545	return sprintf(buf, "0x%x\n", info->version.impl_ver);
1546}
1547static DEVICE_ATTR_RO(firmware_version);
1548
1549static ssize_t vendor_id_show(struct device *dev,
1550			      struct device_attribute *attr, char *buf)
1551{
1552	struct scmi_info *info = dev_get_drvdata(dev);
1553
1554	return sprintf(buf, "%s\n", info->version.vendor_id);
1555}
1556static DEVICE_ATTR_RO(vendor_id);
1557
1558static ssize_t sub_vendor_id_show(struct device *dev,
1559				  struct device_attribute *attr, char *buf)
1560{
1561	struct scmi_info *info = dev_get_drvdata(dev);
1562
1563	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1564}
1565static DEVICE_ATTR_RO(sub_vendor_id);
1566
1567static struct attribute *versions_attrs[] = {
1568	&dev_attr_firmware_version.attr,
1569	&dev_attr_protocol_version.attr,
1570	&dev_attr_vendor_id.attr,
1571	&dev_attr_sub_vendor_id.attr,
1572	NULL,
1573};
1574ATTRIBUTE_GROUPS(versions);
1575
1576/* Each compatible listed below must have descriptor associated with it */
1577static const struct of_device_id scmi_of_match[] = {
1578#ifdef CONFIG_MAILBOX
1579	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
1580#endif
1581#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
1582	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
1583#endif
1584	{ /* Sentinel */ },
1585};
1586
1587MODULE_DEVICE_TABLE(of, scmi_of_match);
1588
1589static struct platform_driver scmi_driver = {
1590	.driver = {
1591		   .name = "arm-scmi",
1592		   .of_match_table = scmi_of_match,
1593		   .dev_groups = versions_groups,
1594		   },
1595	.probe = scmi_probe,
1596	.remove = scmi_remove,
1597};
1598
1599static int __init scmi_driver_init(void)
1600{
1601	scmi_bus_init();
1602
1603	scmi_base_register();
1604
1605	scmi_clock_register();
1606	scmi_perf_register();
1607	scmi_power_register();
1608	scmi_reset_register();
1609	scmi_sensors_register();
1610	scmi_voltage_register();
1611	scmi_system_register();
1612
1613	return platform_driver_register(&scmi_driver);
1614}
1615subsys_initcall(scmi_driver_init);
1616
1617static void __exit scmi_driver_exit(void)
1618{
1619	scmi_base_unregister();
1620
1621	scmi_clock_unregister();
1622	scmi_perf_unregister();
1623	scmi_power_unregister();
1624	scmi_reset_unregister();
1625	scmi_sensors_unregister();
1626	scmi_voltage_unregister();
1627	scmi_system_unregister();
1628
1629	scmi_bus_exit();
1630
1631	platform_driver_unregister(&scmi_driver);
1632}
1633module_exit(scmi_driver_exit);
1634
1635MODULE_ALIAS("platform: arm-scmi");
1636MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1637MODULE_DESCRIPTION("ARM SCMI protocol driver");
1638MODULE_LICENSE("GPL v2");