Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * System Control and Management Interface (SCMI) Message Protocol driver
  4 *
  5 * SCMI Message Protocol is used between the System Control Processor(SCP)
  6 * and the Application Processors(AP). The Message Handling Unit(MHU)
  7 * provides a mechanism for inter-processor communication between SCP's
  8 * Cortex M3 and AP.
  9 *
 10 * SCP offers control and management of the core/cluster power states,
 11 * various power domain DVFS including the core/cluster, certain system
 12 * clocks configuration, thermal sensors and many others.
 13 *
 14 * Copyright (C) 2018 ARM Ltd.
 15 */
 16
 17#include <linux/bitmap.h>
 18#include <linux/export.h>
 19#include <linux/io.h>
 20#include <linux/kernel.h>
 21#include <linux/ktime.h>
 
 22#include <linux/module.h>
 23#include <linux/of_address.h>
 24#include <linux/of_device.h>
 25#include <linux/processor.h>
 
 26#include <linux/slab.h>
 27
 28#include "common.h"
 29#include "notify.h"
 30
 31#define CREATE_TRACE_POINTS
 32#include <trace/events/scmi.h>
 
 
 
 
 
 
 
 
 33
 34enum scmi_error_codes {
 35	SCMI_SUCCESS = 0,	/* Success */
 36	SCMI_ERR_SUPPORT = -1,	/* Not supported */
 37	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
 38	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
 39	SCMI_ERR_ENTRY = -4,	/* Not found */
 40	SCMI_ERR_RANGE = -5,	/* Value out of range */
 41	SCMI_ERR_BUSY = -6,	/* Device busy */
 42	SCMI_ERR_COMMS = -7,	/* Communication Error */
 43	SCMI_ERR_GENERIC = -8,	/* Generic Error */
 44	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
 45	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
 46	SCMI_ERR_MAX
 47};
 48
 49/* List of all SCMI devices active in system */
 50static LIST_HEAD(scmi_list);
 51/* Protection for the entire list */
 52static DEFINE_MUTEX(scmi_list_mutex);
 53/* Track the unique id for the transfers for debug & profiling purpose */
 54static atomic_t transfer_last_id;
 55
 56/**
 57 * struct scmi_xfers_info - Structure to manage transfer information
 58 *
 59 * @xfer_block: Preallocated Message array
 60 * @xfer_alloc_table: Bitmap table for allocated messages.
 61 *	Index of this bitmap table is also used for message
 62 *	sequence identifier.
 63 * @xfer_lock: Protection for message allocation
 64 */
 65struct scmi_xfers_info {
 66	struct scmi_xfer *xfer_block;
 67	unsigned long *xfer_alloc_table;
 
 68	spinlock_t xfer_lock;
 69};
 70
 71/**
 72 * struct scmi_info - Structure representing a SCMI instance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73 *
 74 * @dev: Device pointer
 75 * @desc: SoC description for this instance
 
 76 * @version: SCMI revision information containing protocol version,
 77 *	implementation version and (sub-)vendor identification.
 78 * @handle: Instance of SCMI handle to send to clients
 79 * @tx_minfo: Universal Transmit Message management info
 80 * @rx_minfo: Universal Receive Message management info
 81 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
 82 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
 83 * @protocols_imp: List of protocols implemented, currently maximum of
 84 *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
 85 * @node: List head
 86 * @users: Number of users of this instance
 87 */
 88struct scmi_info {
 89	struct device *dev;
 90	const struct scmi_desc *desc;
 91	struct scmi_revision_info version;
 92	struct scmi_handle handle;
 93	struct scmi_xfers_info tx_minfo;
 94	struct scmi_xfers_info rx_minfo;
 95	struct idr tx_idr;
 96	struct idr rx_idr;
 97	u8 *protocols_imp;
 98	struct list_head node;
 99	int users;
100};
101
 
102#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104static const int scmi_linux_errmap[] = {
105	/* better than switch case as long as return value is continuous */
106	0,			/* SCMI_SUCCESS */
107	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
108	-EINVAL,		/* SCMI_ERR_PARAM */
109	-EACCES,		/* SCMI_ERR_ACCESS */
110	-ENOENT,		/* SCMI_ERR_ENTRY */
111	-ERANGE,		/* SCMI_ERR_RANGE */
112	-EBUSY,			/* SCMI_ERR_BUSY */
113	-ECOMM,			/* SCMI_ERR_COMMS */
114	-EIO,			/* SCMI_ERR_GENERIC */
115	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
116	-EPROTO,		/* SCMI_ERR_PROTOCOL */
117};
118
119static inline int scmi_to_linux_errno(int errno)
120{
121	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
122		return scmi_linux_errmap[-errno];
123	return -EIO;
124}
125
126/**
127 * scmi_dump_header_dbg() - Helper to dump a message header.
128 *
129 * @dev: Device pointer corresponding to the SCMI entity
130 * @hdr: pointer to header.
131 */
132static inline void scmi_dump_header_dbg(struct device *dev,
133					struct scmi_msg_hdr *hdr)
134{
135	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
136		hdr->id, hdr->seq, hdr->protocol_id);
137}
138
 
 
 
 
 
 
 
 
 
 
 
139/**
140 * scmi_xfer_get() - Allocate one message
141 *
142 * @handle: Pointer to SCMI entity handle
143 * @minfo: Pointer to Tx/Rx Message management info based on channel type
144 *
145 * Helper function which is used by various message functions that are
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146 * exposed to clients of this driver for allocating a message traffic event.
147 *
148 * This function can sleep depending on pending requests already in the system
149 * for the SCMI entity. Further, this also holds a spinlock to maintain
150 * integrity of internal data structures.
151 *
152 * Return: 0 if all went fine, else corresponding error.
153 */
154static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
155				       struct scmi_xfers_info *minfo)
156{
157	u16 xfer_id;
158	struct scmi_xfer *xfer;
159	unsigned long flags, bit_pos;
160	struct scmi_info *info = handle_to_scmi_info(handle);
 
161
162	/* Keep the locked section as small as possible */
163	spin_lock_irqsave(&minfo->xfer_lock, flags);
164	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
165				      info->desc->max_msg);
166	if (bit_pos == info->desc->max_msg) {
167		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
168		return ERR_PTR(-ENOMEM);
169	}
170	set_bit(bit_pos, minfo->xfer_alloc_table);
171	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
172
173	xfer_id = bit_pos;
174
175	xfer = &minfo->xfer_block[xfer_id];
176	xfer->hdr.seq = xfer_id;
177	reinit_completion(&xfer->done);
178	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
179
180	return xfer;
181}
182
183/**
184 * __scmi_xfer_put() - Release a message
185 *
186 * @minfo: Pointer to Tx/Rx Message management info based on channel type
187 * @xfer: message that was reserved by scmi_xfer_get
188 *
189 * This holds a spinlock to maintain integrity of internal data structures.
190 */
191static void
192__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
193{
194	unsigned long flags;
 
 
195
196	/*
197	 * Keep the locked section as small as possible
198	 * NOTE: we might escape with smp_mb and no lock here..
199	 * but just be conservative and symmetric.
200	 */
201	spin_lock_irqsave(&minfo->xfer_lock, flags);
202	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
203	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
204}
205
206static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
207{
208	struct scmi_xfer *xfer;
209	struct device *dev = cinfo->dev;
210	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
211	struct scmi_xfers_info *minfo = &info->rx_minfo;
212	ktime_t ts;
213
214	ts = ktime_get_boottime();
215	xfer = scmi_xfer_get(cinfo->handle, minfo);
216	if (IS_ERR(xfer)) {
217		dev_err(dev, "failed to get free message slot (%ld)\n",
218			PTR_ERR(xfer));
219		info->desc->ops->clear_channel(cinfo);
220		return;
221	}
222
223	unpack_scmi_header(msg_hdr, &xfer->hdr);
224	scmi_dump_header_dbg(dev, &xfer->hdr);
225	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
226					    xfer);
227	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
228		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
229
230	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
231			   xfer->hdr.protocol_id, xfer->hdr.seq,
232			   MSG_TYPE_NOTIFICATION);
233
234	__scmi_xfer_put(minfo, xfer);
235
236	info->desc->ops->clear_channel(cinfo);
237}
238
239static void scmi_handle_response(struct scmi_chan_info *cinfo,
240				 u16 xfer_id, u8 msg_type)
241{
242	struct scmi_xfer *xfer;
243	struct device *dev = cinfo->dev;
244	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
245	struct scmi_xfers_info *minfo = &info->tx_minfo;
246
247	/* Are we even expecting this? */
248	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
249		dev_err(dev, "message for %d is not expected!\n", xfer_id);
250		info->desc->ops->clear_channel(cinfo);
251		return;
252	}
253
254	xfer = &minfo->xfer_block[xfer_id];
255	/*
256	 * Even if a response was indeed expected on this slot at this point,
257	 * a buggy platform could wrongly reply feeding us an unexpected
258	 * delayed response we're not prepared to handle: bail-out safely
259	 * blaming firmware.
260	 */
261	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
262		dev_err(dev,
263			"Delayed Response for %d not expected! Buggy F/W ?\n",
264			xfer_id);
265		info->desc->ops->clear_channel(cinfo);
266		/* It was unexpected, so nobody will clear the xfer if not us */
267		__scmi_xfer_put(minfo, xfer);
268		return;
269	}
270
271	scmi_dump_header_dbg(dev, &xfer->hdr);
272
273	info->desc->ops->fetch_response(cinfo, xfer);
274
275	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
276			   xfer->hdr.protocol_id, xfer->hdr.seq,
277			   msg_type);
278
279	if (msg_type == MSG_TYPE_DELAYED_RESP) {
280		info->desc->ops->clear_channel(cinfo);
281		complete(xfer->async_done);
282	} else {
283		complete(&xfer->done);
284	}
285}
286
287/**
288 * scmi_rx_callback() - callback for receiving messages
289 *
290 * @cinfo: SCMI channel info
291 * @msg_hdr: Message header
292 *
293 * Processes one received message to appropriate transfer information and
294 * signals completion of the transfer.
295 *
296 * NOTE: This function will be invoked in IRQ context, hence should be
297 * as optimal as possible.
298 */
299void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
300{
301	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
302	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
303
304	switch (msg_type) {
305	case MSG_TYPE_NOTIFICATION:
306		scmi_handle_notification(cinfo, msg_hdr);
307		break;
308	case MSG_TYPE_COMMAND:
309	case MSG_TYPE_DELAYED_RESP:
310		scmi_handle_response(cinfo, xfer_id, msg_type);
311		break;
312	default:
313		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
314		break;
315	}
316}
317
318/**
319 * scmi_xfer_put() - Release a transmit message
320 *
321 * @handle: Pointer to SCMI entity handle
322 * @xfer: message that was reserved by scmi_xfer_get
323 */
324void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
325{
326	struct scmi_info *info = handle_to_scmi_info(handle);
327
328	__scmi_xfer_put(&info->tx_minfo, xfer);
 
 
329}
330
331#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
332
333static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
334				      struct scmi_xfer *xfer, ktime_t stop)
335{
336	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
337
338	return info->desc->ops->poll_done(cinfo, xfer) ||
339	       ktime_after(ktime_get(), stop);
340}
341
342/**
343 * scmi_do_xfer() - Do one transfer
344 *
345 * @handle: Pointer to SCMI entity handle
346 * @xfer: Transfer to initiate and wait for response
347 *
348 * Return: -ETIMEDOUT in case of no response, if transmit error,
349 *	return corresponding error, else if all goes well,
350 *	return 0.
351 */
352int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
353{
354	int ret;
355	int timeout;
356	struct scmi_info *info = handle_to_scmi_info(handle);
357	struct device *dev = info->dev;
358	struct scmi_chan_info *cinfo;
359
360	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
361	if (unlikely(!cinfo))
362		return -EINVAL;
363
364	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
365			      xfer->hdr.protocol_id, xfer->hdr.seq,
366			      xfer->hdr.poll_completion);
367
368	ret = info->desc->ops->send_message(cinfo, xfer);
369	if (ret < 0) {
370		dev_dbg(dev, "Failed to send message %d\n", ret);
371		return ret;
372	}
373
 
 
 
374	if (xfer->hdr.poll_completion) {
375		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
376
377		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
378
379		if (ktime_before(ktime_get(), stop))
380			info->desc->ops->fetch_response(cinfo, xfer);
381		else
382			ret = -ETIMEDOUT;
383	} else {
384		/* And we wait for the response. */
385		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
386		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
387			dev_err(dev, "timed out in resp(caller: %pS)\n",
388				(void *)_RET_IP_);
389			ret = -ETIMEDOUT;
390		}
391	}
392
393	if (!ret && xfer->hdr.status)
394		ret = scmi_to_linux_errno(xfer->hdr.status);
395
396	if (info->desc->ops->mark_txdone)
397		info->desc->ops->mark_txdone(cinfo, ret);
398
399	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
400			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
401
402	return ret;
403}
404
405#define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
406
407/**
408 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
409 *	response is received
410 *
411 * @handle: Pointer to SCMI entity handle
412 * @xfer: Transfer to initiate and wait for response
413 *
414 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
415 *	return corresponding error, else if all goes well, return 0.
416 */
417int scmi_do_xfer_with_response(const struct scmi_handle *handle,
418			       struct scmi_xfer *xfer)
419{
420	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
421	DECLARE_COMPLETION_ONSTACK(async_response);
422
423	xfer->async_done = &async_response;
424
425	ret = scmi_do_xfer(handle, xfer);
426	if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
427		ret = -ETIMEDOUT;
428
429	xfer->async_done = NULL;
430	return ret;
431}
432
433/**
434 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
435 *
436 * @handle: Pointer to SCMI entity handle
437 * @msg_id: Message identifier
438 * @prot_id: Protocol identifier for the message
439 * @tx_size: transmit message size
440 * @rx_size: receive message size
441 * @p: pointer to the allocated and initialised message
442 *
443 * This function allocates the message using @scmi_xfer_get and
444 * initialise the header.
445 *
446 * Return: 0 if all went fine with @p pointing to message, else
447 *	corresponding error.
448 */
449int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
450		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
451{
452	int ret;
453	struct scmi_xfer *xfer;
454	struct scmi_info *info = handle_to_scmi_info(handle);
455	struct scmi_xfers_info *minfo = &info->tx_minfo;
456	struct device *dev = info->dev;
457
458	/* Ensure we have sane transfer sizes */
459	if (rx_size > info->desc->max_msg_size ||
460	    tx_size > info->desc->max_msg_size)
461		return -ERANGE;
462
463	xfer = scmi_xfer_get(handle, minfo);
464	if (IS_ERR(xfer)) {
465		ret = PTR_ERR(xfer);
466		dev_err(dev, "failed to get free message slot(%d)\n", ret);
467		return ret;
468	}
469
470	xfer->tx.len = tx_size;
471	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
472	xfer->hdr.id = msg_id;
473	xfer->hdr.protocol_id = prot_id;
474	xfer->hdr.poll_completion = false;
475
476	*p = xfer;
477
478	return 0;
479}
480
481/**
482 * scmi_version_get() - command to get the revision of the SCMI entity
483 *
484 * @handle: Pointer to SCMI entity handle
485 * @protocol: Protocol identifier for the message
486 * @version: Holds returned version of protocol.
487 *
488 * Updates the SCMI information in the internal data structure.
489 *
490 * Return: 0 if all went fine, else return appropriate error.
491 */
492int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
493		     u32 *version)
494{
495	int ret;
496	__le32 *rev_info;
497	struct scmi_xfer *t;
498
499	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
500				 sizeof(*version), &t);
501	if (ret)
502		return ret;
503
504	ret = scmi_do_xfer(handle, t);
505	if (!ret) {
506		rev_info = t->rx.buf;
507		*version = le32_to_cpu(*rev_info);
508	}
509
510	scmi_xfer_put(handle, t);
511	return ret;
512}
513
514void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
515				     u8 *prot_imp)
516{
517	struct scmi_info *info = handle_to_scmi_info(handle);
518
519	info->protocols_imp = prot_imp;
520}
521
522static bool
523scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
524{
525	int i;
526	struct scmi_info *info = handle_to_scmi_info(handle);
527
528	if (!info->protocols_imp)
529		return false;
530
531	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
532		if (info->protocols_imp[i] == prot_id)
533			return true;
534	return false;
535}
536
537/**
538 * scmi_handle_get() - Get the SCMI handle for a device
539 *
540 * @dev: pointer to device for which we want SCMI handle
541 *
542 * NOTE: The function does not track individual clients of the framework
543 * and is expected to be maintained by caller of SCMI protocol library.
544 * scmi_handle_put must be balanced with successful scmi_handle_get
545 *
546 * Return: pointer to handle if successful, NULL on error
547 */
548struct scmi_handle *scmi_handle_get(struct device *dev)
549{
550	struct list_head *p;
551	struct scmi_info *info;
552	struct scmi_handle *handle = NULL;
553
554	mutex_lock(&scmi_list_mutex);
555	list_for_each(p, &scmi_list) {
556		info = list_entry(p, struct scmi_info, node);
557		if (dev->parent == info->dev) {
558			handle = &info->handle;
559			info->users++;
560			break;
561		}
562	}
563	mutex_unlock(&scmi_list_mutex);
564
565	return handle;
566}
567
568/**
569 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
570 *
571 * @handle: handle acquired by scmi_handle_get
572 *
573 * NOTE: The function does not track individual clients of the framework
574 * and is expected to be maintained by caller of SCMI protocol library.
575 * scmi_handle_put must be balanced with successful scmi_handle_get
576 *
577 * Return: 0 is successfully released
578 *	if null was passed, it returns -EINVAL;
579 */
580int scmi_handle_put(const struct scmi_handle *handle)
581{
582	struct scmi_info *info;
583
584	if (!handle)
585		return -EINVAL;
586
587	info = handle_to_scmi_info(handle);
588	mutex_lock(&scmi_list_mutex);
589	if (!WARN_ON(!info->users))
590		info->users--;
591	mutex_unlock(&scmi_list_mutex);
592
593	return 0;
594}
595
596static int __scmi_xfer_info_init(struct scmi_info *sinfo,
597				 struct scmi_xfers_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
598{
599	int i;
600	struct scmi_xfer *xfer;
601	struct device *dev = sinfo->dev;
602	const struct scmi_desc *desc = sinfo->desc;
 
603
604	/* Pre-allocated messages, no more than what hdr.seq can support */
605	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
606		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
607			desc->max_msg, MSG_TOKEN_MAX);
608		return -EINVAL;
609	}
610
611	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
612					sizeof(*info->xfer_block), GFP_KERNEL);
613	if (!info->xfer_block)
614		return -ENOMEM;
615
616	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
617					      sizeof(long), GFP_KERNEL);
618	if (!info->xfer_alloc_table)
619		return -ENOMEM;
620
 
 
621	/* Pre-initialize the buffer pointer to pre-allocated buffers */
622	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
623		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
624					    GFP_KERNEL);
625		if (!xfer->rx.buf)
626			return -ENOMEM;
627
628		xfer->tx.buf = xfer->rx.buf;
629		init_completion(&xfer->done);
630	}
631
632	spin_lock_init(&info->xfer_lock);
633
634	return 0;
635}
636
637static int scmi_xfer_info_init(struct scmi_info *sinfo)
638{
639	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
640
641	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
642		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643
644	return ret;
645}
646
647static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
648			   int prot_id, bool tx)
649{
650	int ret, idx;
 
 
 
651	struct scmi_chan_info *cinfo;
652	struct idr *idr;
653
654	/* Transmit channel is first entry i.e. index 0 */
655	idx = tx ? 0 : 1;
656	idr = tx ? &info->tx_idr : &info->rx_idr;
657
658	/* check if already allocated, used for multiple device per protocol */
659	cinfo = idr_find(idr, prot_id);
660	if (cinfo)
661		return 0;
662
663	if (!info->desc->ops->chan_available(dev, idx)) {
664		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
665		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
666			return -EINVAL;
667		goto idr_alloc;
668	}
669
670	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
671	if (!cinfo)
672		return -ENOMEM;
673
674	cinfo->dev = dev;
675
676	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
677	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
678		return ret;
 
679
680idr_alloc:
681	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
682	if (ret != prot_id) {
683		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
684		return ret;
685	}
686
687	cinfo->handle = &info->handle;
688	return 0;
689}
690
691static inline int
692scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
693{
694	int ret = scmi_chan_setup(info, dev, prot_id, true);
695
696	if (!ret) /* Rx is optional, hence no error check */
697		scmi_chan_setup(info, dev, prot_id, false);
698
699	return ret;
700}
701
702static inline void
703scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
704			    int prot_id, const char *name)
705{
706	struct scmi_device *sdev;
707
708	sdev = scmi_device_create(np, info->dev, prot_id, name);
709	if (!sdev) {
710		dev_err(info->dev, "failed to create %d protocol device\n",
711			prot_id);
712		return;
713	}
714
715	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
716		dev_err(&sdev->dev, "failed to setup transport\n");
717		scmi_device_destroy(sdev);
718		return;
719	}
720
721	/* setup handle now as the transport is ready */
722	scmi_set_handle(sdev);
723}
724
725#define MAX_SCMI_DEV_PER_PROTOCOL	2
726struct scmi_prot_devnames {
727	int protocol_id;
728	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
729};
730
731static struct scmi_prot_devnames devnames[] = {
732	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
733	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
734	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
735	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
736	{ SCMI_PROTOCOL_RESET,  { "reset" },},
737};
738
739static inline void
740scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
741			     int prot_id)
742{
743	int loop, cnt;
744
745	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
746		if (devnames[loop].protocol_id != prot_id)
747			continue;
748
749		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
750			const char *name = devnames[loop].names[cnt];
751
752			if (name)
753				scmi_create_protocol_device(np, info, prot_id,
754							    name);
755		}
756	}
757}
758
759static int scmi_probe(struct platform_device *pdev)
760{
761	int ret;
762	struct scmi_handle *handle;
763	const struct scmi_desc *desc;
764	struct scmi_info *info;
765	struct device *dev = &pdev->dev;
766	struct device_node *child, *np = dev->of_node;
767
768	desc = of_device_get_match_data(dev);
769	if (!desc)
 
770		return -EINVAL;
 
 
 
771
772	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
773	if (!info)
774		return -ENOMEM;
775
776	info->dev = dev;
777	info->desc = desc;
778	INIT_LIST_HEAD(&info->node);
779
 
 
 
 
780	platform_set_drvdata(pdev, info);
781	idr_init(&info->tx_idr);
782	idr_init(&info->rx_idr);
783
784	handle = &info->handle;
785	handle->dev = info->dev;
786	handle->version = &info->version;
787
788	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
789	if (ret)
790		return ret;
791
792	ret = scmi_xfer_info_init(info);
793	if (ret)
794		return ret;
795
796	if (scmi_notification_init(handle))
797		dev_err(dev, "SCMI Notifications NOT available.\n");
798
799	ret = scmi_base_protocol_init(handle);
800	if (ret) {
801		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
802		return ret;
803	}
804
805	mutex_lock(&scmi_list_mutex);
806	list_add_tail(&info->node, &scmi_list);
807	mutex_unlock(&scmi_list_mutex);
808
809	for_each_available_child_of_node(np, child) {
810		u32 prot_id;
811
812		if (of_property_read_u32(child, "reg", &prot_id))
813			continue;
814
815		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
816			dev_err(dev, "Out of range protocol %d\n", prot_id);
817
818		if (!scmi_is_protocol_implemented(handle, prot_id)) {
819			dev_err(dev, "SCMI protocol %d not implemented\n",
820				prot_id);
821			continue;
822		}
823
824		scmi_create_protocol_devices(child, info, prot_id);
825	}
826
827	return 0;
828}
829
830void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
831{
832	idr_remove(idr, id);
833}
834
835static int scmi_remove(struct platform_device *pdev)
836{
837	int ret = 0;
838	struct scmi_info *info = platform_get_drvdata(pdev);
839	struct idr *idr = &info->tx_idr;
840
841	scmi_notification_exit(&info->handle);
842
843	mutex_lock(&scmi_list_mutex);
844	if (info->users)
845		ret = -EBUSY;
846	else
847		list_del(&info->node);
848	mutex_unlock(&scmi_list_mutex);
849
850	if (ret)
851		return ret;
852
853	/* Safe to free channels since no more users */
854	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
855	idr_destroy(&info->tx_idr);
856
857	idr = &info->rx_idr;
858	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
859	idr_destroy(&info->rx_idr);
860
861	return ret;
862}
863
864static ssize_t protocol_version_show(struct device *dev,
865				     struct device_attribute *attr, char *buf)
866{
867	struct scmi_info *info = dev_get_drvdata(dev);
868
869	return sprintf(buf, "%u.%u\n", info->version.major_ver,
870		       info->version.minor_ver);
871}
872static DEVICE_ATTR_RO(protocol_version);
873
874static ssize_t firmware_version_show(struct device *dev,
875				     struct device_attribute *attr, char *buf)
876{
877	struct scmi_info *info = dev_get_drvdata(dev);
878
879	return sprintf(buf, "0x%x\n", info->version.impl_ver);
880}
881static DEVICE_ATTR_RO(firmware_version);
882
883static ssize_t vendor_id_show(struct device *dev,
884			      struct device_attribute *attr, char *buf)
885{
886	struct scmi_info *info = dev_get_drvdata(dev);
887
888	return sprintf(buf, "%s\n", info->version.vendor_id);
889}
890static DEVICE_ATTR_RO(vendor_id);
891
892static ssize_t sub_vendor_id_show(struct device *dev,
893				  struct device_attribute *attr, char *buf)
894{
895	struct scmi_info *info = dev_get_drvdata(dev);
896
897	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
898}
899static DEVICE_ATTR_RO(sub_vendor_id);
900
901static struct attribute *versions_attrs[] = {
902	&dev_attr_firmware_version.attr,
903	&dev_attr_protocol_version.attr,
904	&dev_attr_vendor_id.attr,
905	&dev_attr_sub_vendor_id.attr,
906	NULL,
907};
908ATTRIBUTE_GROUPS(versions);
909
910/* Each compatible listed below must have descriptor associated with it */
911static const struct of_device_id scmi_of_match[] = {
912	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
913#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
914	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
915#endif
916	{ /* Sentinel */ },
917};
918
919MODULE_DEVICE_TABLE(of, scmi_of_match);
920
921static struct platform_driver scmi_driver = {
922	.driver = {
923		   .name = "arm-scmi",
924		   .of_match_table = scmi_of_match,
925		   .dev_groups = versions_groups,
926		   },
927	.probe = scmi_probe,
928	.remove = scmi_remove,
929};
930
931module_platform_driver(scmi_driver);
932
933MODULE_ALIAS("platform: arm-scmi");
934MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
935MODULE_DESCRIPTION("ARM SCMI protocol driver");
936MODULE_LICENSE("GPL v2");
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * System Control and Management Interface (SCMI) Message Protocol driver
  4 *
  5 * SCMI Message Protocol is used between the System Control Processor(SCP)
  6 * and the Application Processors(AP). The Message Handling Unit(MHU)
  7 * provides a mechanism for inter-processor communication between SCP's
  8 * Cortex M3 and AP.
  9 *
 10 * SCP offers control and management of the core/cluster power states,
 11 * various power domain DVFS including the core/cluster, certain system
 12 * clocks configuration, thermal sensors and many others.
 13 *
 14 * Copyright (C) 2018 ARM Ltd.
 15 */
 16
 17#include <linux/bitmap.h>
 18#include <linux/export.h>
 19#include <linux/io.h>
 20#include <linux/kernel.h>
 21#include <linux/ktime.h>
 22#include <linux/mailbox_client.h>
 23#include <linux/module.h>
 24#include <linux/of_address.h>
 25#include <linux/of_device.h>
 26#include <linux/processor.h>
 27#include <linux/semaphore.h>
 28#include <linux/slab.h>
 29
 30#include "common.h"
 
 31
 32#define MSG_ID_SHIFT		0
 33#define MSG_ID_MASK		0xff
 34#define MSG_TYPE_SHIFT		8
 35#define MSG_TYPE_MASK		0x3
 36#define MSG_PROTOCOL_ID_SHIFT	10
 37#define MSG_PROTOCOL_ID_MASK	0xff
 38#define MSG_TOKEN_ID_SHIFT	18
 39#define MSG_TOKEN_ID_MASK	0x3ff
 40#define MSG_XTRACT_TOKEN(header)	\
 41	(((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK)
 42
 43enum scmi_error_codes {
 44	SCMI_SUCCESS = 0,	/* Success */
 45	SCMI_ERR_SUPPORT = -1,	/* Not supported */
 46	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
 47	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
 48	SCMI_ERR_ENTRY = -4,	/* Not found */
 49	SCMI_ERR_RANGE = -5,	/* Value out of range */
 50	SCMI_ERR_BUSY = -6,	/* Device busy */
 51	SCMI_ERR_COMMS = -7,	/* Communication Error */
 52	SCMI_ERR_GENERIC = -8,	/* Generic Error */
 53	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
 54	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
 55	SCMI_ERR_MAX
 56};
 57
 58/* List of all  SCMI devices active in system */
 59static LIST_HEAD(scmi_list);
 60/* Protection for the entire list */
 61static DEFINE_MUTEX(scmi_list_mutex);
 
 
 62
 63/**
 64 * struct scmi_xfers_info - Structure to manage transfer information
 65 *
 66 * @xfer_block: Preallocated Message array
 67 * @xfer_alloc_table: Bitmap table for allocated messages.
 68 *	Index of this bitmap table is also used for message
 69 *	sequence identifier.
 70 * @xfer_lock: Protection for message allocation
 71 */
 72struct scmi_xfers_info {
 73	struct scmi_xfer *xfer_block;
 74	unsigned long *xfer_alloc_table;
 75	/* protect transfer allocation */
 76	spinlock_t xfer_lock;
 77};
 78
 79/**
 80 * struct scmi_desc - Description of SoC integration
 81 *
 82 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
 83 * @max_msg: Maximum number of messages that can be pending
 84 *	simultaneously in the system
 85 * @max_msg_size: Maximum size of data per message that can be handled.
 86 */
 87struct scmi_desc {
 88	int max_rx_timeout_ms;
 89	int max_msg;
 90	int max_msg_size;
 91};
 92
 93/**
 94 * struct scmi_chan_info - Structure representing a SCMI channel informfation
 95 *
 96 * @cl: Mailbox Client
 97 * @chan: Transmit/Receive mailbox channel
 98 * @payload: Transmit/Receive mailbox channel payload area
 99 * @dev: Reference to device in the SCMI hierarchy corresponding to this
100 *	 channel
101 */
102struct scmi_chan_info {
103	struct mbox_client cl;
104	struct mbox_chan *chan;
105	void __iomem *payload;
106	struct device *dev;
107	struct scmi_handle *handle;
108};
109
110/**
111 * struct scmi_info - Structure representing a  SCMI instance
112 *
113 * @dev: Device pointer
114 * @desc: SoC description for this instance
115 * @handle: Instance of SCMI handle to send to clients
116 * @version: SCMI revision information containing protocol version,
117 *	implementation version and (sub-)vendor identification.
118 * @minfo: Message info
119 * @tx_idr: IDR object to map protocol id to channel info pointer
120 * @protocols_imp: list of protocols implemented, currently maximum of
 
 
 
121 *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
122 * @node: list head
123 * @users: Number of users of this instance
124 */
125struct scmi_info {
126	struct device *dev;
127	const struct scmi_desc *desc;
128	struct scmi_revision_info version;
129	struct scmi_handle handle;
130	struct scmi_xfers_info minfo;
 
131	struct idr tx_idr;
 
132	u8 *protocols_imp;
133	struct list_head node;
134	int users;
135};
136
137#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
138#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
139
140/*
141 * SCMI specification requires all parameters, message headers, return
142 * arguments or any protocol data to be expressed in little endian
143 * format only.
144 */
145struct scmi_shared_mem {
146	__le32 reserved;
147	__le32 channel_status;
148#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
149#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
150	__le32 reserved1[2];
151	__le32 flags;
152#define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
153	__le32 length;
154	__le32 msg_header;
155	u8 msg_payload[0];
156};
157
158static const int scmi_linux_errmap[] = {
159	/* better than switch case as long as return value is continuous */
160	0,			/* SCMI_SUCCESS */
161	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
162	-EINVAL,		/* SCMI_ERR_PARAM */
163	-EACCES,		/* SCMI_ERR_ACCESS */
164	-ENOENT,		/* SCMI_ERR_ENTRY */
165	-ERANGE,		/* SCMI_ERR_RANGE */
166	-EBUSY,			/* SCMI_ERR_BUSY */
167	-ECOMM,			/* SCMI_ERR_COMMS */
168	-EIO,			/* SCMI_ERR_GENERIC */
169	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
170	-EPROTO,		/* SCMI_ERR_PROTOCOL */
171};
172
173static inline int scmi_to_linux_errno(int errno)
174{
175	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
176		return scmi_linux_errmap[-errno];
177	return -EIO;
178}
179
180/**
181 * scmi_dump_header_dbg() - Helper to dump a message header.
182 *
183 * @dev: Device pointer corresponding to the SCMI entity
184 * @hdr: pointer to header.
185 */
186static inline void scmi_dump_header_dbg(struct device *dev,
187					struct scmi_msg_hdr *hdr)
188{
189	dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
190		hdr->id, hdr->seq, hdr->protocol_id);
191}
192
193static void scmi_fetch_response(struct scmi_xfer *xfer,
194				struct scmi_shared_mem __iomem *mem)
195{
196	xfer->hdr.status = ioread32(mem->msg_payload);
197	/* Skip the length of header and statues in payload area i.e 8 bytes*/
198	xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
199
200	/* Take a copy to the rx buffer.. */
201	memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
202}
203
204/**
205 * scmi_rx_callback() - mailbox client callback for receive messages
206 *
207 * @cl: client pointer
208 * @m: mailbox message
209 *
210 * Processes one received message to appropriate transfer information and
211 * signals completion of the transfer.
212 *
213 * NOTE: This function will be invoked in IRQ context, hence should be
214 * as optimal as possible.
215 */
216static void scmi_rx_callback(struct mbox_client *cl, void *m)
217{
218	u16 xfer_id;
219	struct scmi_xfer *xfer;
220	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
221	struct device *dev = cinfo->dev;
222	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
223	struct scmi_xfers_info *minfo = &info->minfo;
224	struct scmi_shared_mem __iomem *mem = cinfo->payload;
225
226	xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
227
228	/*
229	 * Are we even expecting this?
230	 */
231	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
232		dev_err(dev, "message for %d is not expected!\n", xfer_id);
233		return;
234	}
235
236	xfer = &minfo->xfer_block[xfer_id];
237
238	scmi_dump_header_dbg(dev, &xfer->hdr);
239	/* Is the message of valid length? */
240	if (xfer->rx.len > info->desc->max_msg_size) {
241		dev_err(dev, "unable to handle %zu xfer(max %d)\n",
242			xfer->rx.len, info->desc->max_msg_size);
243		return;
244	}
245
246	scmi_fetch_response(xfer, mem);
247	complete(&xfer->done);
248}
249
250/**
251 * pack_scmi_header() - packs and returns 32-bit header
252 *
253 * @hdr: pointer to header containing all the information on message id,
254 *	protocol id and sequence id.
255 */
256static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
257{
258	return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) |
259	   ((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) |
260	   ((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT);
261}
262
263/**
264 * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
265 *
266 * @cl: client pointer
267 * @m: mailbox message
268 *
269 * This function prepares the shared memory which contains the header and the
270 * payload.
271 */
272static void scmi_tx_prepare(struct mbox_client *cl, void *m)
273{
274	struct scmi_xfer *t = m;
275	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
276	struct scmi_shared_mem __iomem *mem = cinfo->payload;
277
278	/* Mark channel busy + clear error */
279	iowrite32(0x0, &mem->channel_status);
280	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
281		  &mem->flags);
282	iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
283	iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
284	if (t->tx.buf)
285		memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
286}
287
288/**
289 * scmi_one_xfer_get() - Allocate one message
290 *
291 * @handle: SCMI entity handle
292 *
293 * Helper function which is used by various command functions that are
294 * exposed to clients of this driver for allocating a message traffic event.
295 *
296 * This function can sleep depending on pending requests already in the system
297 * for the SCMI entity. Further, this also holds a spinlock to maintain
298 * integrity of internal data structures.
299 *
300 * Return: 0 if all went fine, else corresponding error.
301 */
302static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle)
 
303{
304	u16 xfer_id;
305	struct scmi_xfer *xfer;
306	unsigned long flags, bit_pos;
307	struct scmi_info *info = handle_to_scmi_info(handle);
308	struct scmi_xfers_info *minfo = &info->minfo;
309
310	/* Keep the locked section as small as possible */
311	spin_lock_irqsave(&minfo->xfer_lock, flags);
312	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
313				      info->desc->max_msg);
314	if (bit_pos == info->desc->max_msg) {
315		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
316		return ERR_PTR(-ENOMEM);
317	}
318	set_bit(bit_pos, minfo->xfer_alloc_table);
319	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
320
321	xfer_id = bit_pos;
322
323	xfer = &minfo->xfer_block[xfer_id];
324	xfer->hdr.seq = xfer_id;
325	reinit_completion(&xfer->done);
 
326
327	return xfer;
328}
329
330/**
331 * scmi_one_xfer_put() - Release a message
332 *
333 * @minfo: transfer info pointer
334 * @xfer: message that was reserved by scmi_one_xfer_get
335 *
336 * This holds a spinlock to maintain integrity of internal data structures.
337 */
338void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
 
339{
340	unsigned long flags;
341	struct scmi_info *info = handle_to_scmi_info(handle);
342	struct scmi_xfers_info *minfo = &info->minfo;
343
344	/*
345	 * Keep the locked section as small as possible
346	 * NOTE: we might escape with smp_mb and no lock here..
347	 * but just be conservative and symmetric.
348	 */
349	spin_lock_irqsave(&minfo->xfer_lock, flags);
350	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
351	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
352}
353
354static bool
355scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356{
357	struct scmi_shared_mem __iomem *mem = cinfo->payload;
358	u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
359
360	if (xfer->hdr.seq != xfer_id)
361		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
363	return ioread32(&mem->channel_status) &
364		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
365		SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
366}
367
368#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
369
370static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
371				      struct scmi_xfer *xfer, ktime_t stop)
372{
373	ktime_t __cur = ktime_get();
374
375	return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
 
376}
377
378/**
379 * scmi_do_xfer() - Do one transfer
380 *
381 * @info: Pointer to SCMI entity information
382 * @xfer: Transfer to initiate and wait for response
383 *
384 * Return: -ETIMEDOUT in case of no response, if transmit error,
385 *   return corresponding error, else if all goes well,
386 *   return 0.
387 */
388int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
389{
390	int ret;
391	int timeout;
392	struct scmi_info *info = handle_to_scmi_info(handle);
393	struct device *dev = info->dev;
394	struct scmi_chan_info *cinfo;
395
396	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
397	if (unlikely(!cinfo))
398		return -EINVAL;
399
400	ret = mbox_send_message(cinfo->chan, xfer);
 
 
 
 
401	if (ret < 0) {
402		dev_dbg(dev, "mbox send fail %d\n", ret);
403		return ret;
404	}
405
406	/* mbox_send_message returns non-negative value on success, so reset */
407	ret = 0;
408
409	if (xfer->hdr.poll_completion) {
410		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
411
412		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
413
414		if (ktime_before(ktime_get(), stop))
415			scmi_fetch_response(xfer, cinfo->payload);
416		else
417			ret = -ETIMEDOUT;
418	} else {
419		/* And we wait for the response. */
420		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
421		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
422			dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
423				(void *)_RET_IP_);
424			ret = -ETIMEDOUT;
425		}
426	}
427
428	if (!ret && xfer->hdr.status)
429		ret = scmi_to_linux_errno(xfer->hdr.status);
430
431	/*
432	 * NOTE: we might prefer not to need the mailbox ticker to manage the
433	 * transfer queueing since the protocol layer queues things by itself.
434	 * Unfortunately, we have to kick the mailbox framework after we have
435	 * received our message.
436	 */
437	mbox_client_txdone(cinfo->chan, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
 
439	return ret;
440}
441
442/**
443 * scmi_one_xfer_init() - Allocate and initialise one message
444 *
445 * @handle: SCMI entity handle
446 * @msg_id: Message identifier
447 * @msg_prot_id: Protocol identifier for the message
448 * @tx_size: transmit message size
449 * @rx_size: receive message size
450 * @p: pointer to the allocated and initialised message
451 *
452 * This function allocates the message using @scmi_one_xfer_get and
453 * initialise the header.
454 *
455 * Return: 0 if all went fine with @p pointing to message, else
456 *	corresponding error.
457 */
458int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
459		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
460{
461	int ret;
462	struct scmi_xfer *xfer;
463	struct scmi_info *info = handle_to_scmi_info(handle);
 
464	struct device *dev = info->dev;
465
466	/* Ensure we have sane transfer sizes */
467	if (rx_size > info->desc->max_msg_size ||
468	    tx_size > info->desc->max_msg_size)
469		return -ERANGE;
470
471	xfer = scmi_one_xfer_get(handle);
472	if (IS_ERR(xfer)) {
473		ret = PTR_ERR(xfer);
474		dev_err(dev, "failed to get free message slot(%d)\n", ret);
475		return ret;
476	}
477
478	xfer->tx.len = tx_size;
479	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
480	xfer->hdr.id = msg_id;
481	xfer->hdr.protocol_id = prot_id;
482	xfer->hdr.poll_completion = false;
483
484	*p = xfer;
 
485	return 0;
486}
487
488/**
489 * scmi_version_get() - command to get the revision of the SCMI entity
490 *
491 * @handle: Handle to SCMI entity information
 
 
492 *
493 * Updates the SCMI information in the internal data structure.
494 *
495 * Return: 0 if all went fine, else return appropriate error.
496 */
497int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
498		     u32 *version)
499{
500	int ret;
501	__le32 *rev_info;
502	struct scmi_xfer *t;
503
504	ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0,
505				 sizeof(*version), &t);
506	if (ret)
507		return ret;
508
509	ret = scmi_do_xfer(handle, t);
510	if (!ret) {
511		rev_info = t->rx.buf;
512		*version = le32_to_cpu(*rev_info);
513	}
514
515	scmi_one_xfer_put(handle, t);
516	return ret;
517}
518
519void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
520				     u8 *prot_imp)
521{
522	struct scmi_info *info = handle_to_scmi_info(handle);
523
524	info->protocols_imp = prot_imp;
525}
526
527static bool
528scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
529{
530	int i;
531	struct scmi_info *info = handle_to_scmi_info(handle);
532
533	if (!info->protocols_imp)
534		return false;
535
536	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
537		if (info->protocols_imp[i] == prot_id)
538			return true;
539	return false;
540}
541
542/**
543 * scmi_handle_get() - Get the  SCMI handle for a device
544 *
545 * @dev: pointer to device for which we want SCMI handle
546 *
547 * NOTE: The function does not track individual clients of the framework
548 * and is expected to be maintained by caller of  SCMI protocol library.
549 * scmi_handle_put must be balanced with successful scmi_handle_get
550 *
551 * Return: pointer to handle if successful, NULL on error
552 */
553struct scmi_handle *scmi_handle_get(struct device *dev)
554{
555	struct list_head *p;
556	struct scmi_info *info;
557	struct scmi_handle *handle = NULL;
558
559	mutex_lock(&scmi_list_mutex);
560	list_for_each(p, &scmi_list) {
561		info = list_entry(p, struct scmi_info, node);
562		if (dev->parent == info->dev) {
563			handle = &info->handle;
564			info->users++;
565			break;
566		}
567	}
568	mutex_unlock(&scmi_list_mutex);
569
570	return handle;
571}
572
573/**
574 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
575 *
576 * @handle: handle acquired by scmi_handle_get
577 *
578 * NOTE: The function does not track individual clients of the framework
579 * and is expected to be maintained by caller of  SCMI protocol library.
580 * scmi_handle_put must be balanced with successful scmi_handle_get
581 *
582 * Return: 0 is successfully released
583 *	if null was passed, it returns -EINVAL;
584 */
585int scmi_handle_put(const struct scmi_handle *handle)
586{
587	struct scmi_info *info;
588
589	if (!handle)
590		return -EINVAL;
591
592	info = handle_to_scmi_info(handle);
593	mutex_lock(&scmi_list_mutex);
594	if (!WARN_ON(!info->users))
595		info->users--;
596	mutex_unlock(&scmi_list_mutex);
597
598	return 0;
599}
600
601static const struct scmi_desc scmi_generic_desc = {
602	.max_rx_timeout_ms = 30,	/* we may increase this if required */
603	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
604	.max_msg_size = 128,
605};
606
607/* Each compatible listed below must have descriptor associated with it */
608static const struct of_device_id scmi_of_match[] = {
609	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
610	{ /* Sentinel */ },
611};
612
613MODULE_DEVICE_TABLE(of, scmi_of_match);
614
615static int scmi_xfer_info_init(struct scmi_info *sinfo)
616{
617	int i;
618	struct scmi_xfer *xfer;
619	struct device *dev = sinfo->dev;
620	const struct scmi_desc *desc = sinfo->desc;
621	struct scmi_xfers_info *info = &sinfo->minfo;
622
623	/* Pre-allocated messages, no more than what hdr.seq can support */
624	if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) {
625		dev_err(dev, "Maximum message of %d exceeds supported %d\n",
626			desc->max_msg, MSG_TOKEN_ID_MASK + 1);
627		return -EINVAL;
628	}
629
630	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
631					sizeof(*info->xfer_block), GFP_KERNEL);
632	if (!info->xfer_block)
633		return -ENOMEM;
634
635	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
636					      sizeof(long), GFP_KERNEL);
637	if (!info->xfer_alloc_table)
638		return -ENOMEM;
639
640	bitmap_zero(info->xfer_alloc_table, desc->max_msg);
641
642	/* Pre-initialize the buffer pointer to pre-allocated buffers */
643	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
644		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
645					    GFP_KERNEL);
646		if (!xfer->rx.buf)
647			return -ENOMEM;
648
649		xfer->tx.buf = xfer->rx.buf;
650		init_completion(&xfer->done);
651	}
652
653	spin_lock_init(&info->xfer_lock);
654
655	return 0;
656}
657
658static int scmi_mailbox_check(struct device_node *np)
659{
660	struct of_phandle_args arg;
661
662	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
663}
664
665static int scmi_mbox_free_channel(int id, void *p, void *data)
666{
667	struct scmi_chan_info *cinfo = p;
668	struct idr *idr = data;
669
670	if (!IS_ERR_OR_NULL(cinfo->chan)) {
671		mbox_free_channel(cinfo->chan);
672		cinfo->chan = NULL;
673	}
674
675	idr_remove(idr, id);
676
677	return 0;
678}
679
680static int scmi_remove(struct platform_device *pdev)
681{
682	int ret = 0;
683	struct scmi_info *info = platform_get_drvdata(pdev);
684	struct idr *idr = &info->tx_idr;
685
686	mutex_lock(&scmi_list_mutex);
687	if (info->users)
688		ret = -EBUSY;
689	else
690		list_del(&info->node);
691	mutex_unlock(&scmi_list_mutex);
692
693	if (!ret) {
694		/* Safe to free channels since no more users */
695		ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
696		idr_destroy(&info->tx_idr);
697	}
698
699	return ret;
700}
701
702static inline int
703scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
704{
705	int ret;
706	struct resource res;
707	resource_size_t size;
708	struct device_node *shmem, *np = dev->of_node;
709	struct scmi_chan_info *cinfo;
710	struct mbox_client *cl;
711
712	if (scmi_mailbox_check(np)) {
713		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
 
 
 
 
 
 
 
 
 
 
 
714		goto idr_alloc;
715	}
716
717	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
718	if (!cinfo)
719		return -ENOMEM;
720
721	cinfo->dev = dev;
722
723	cl = &cinfo->cl;
724	cl->dev = dev;
725	cl->rx_callback = scmi_rx_callback;
726	cl->tx_prepare = scmi_tx_prepare;
727	cl->tx_block = false;
728	cl->knows_txdone = true;
729
730	shmem = of_parse_phandle(np, "shmem", 0);
731	ret = of_address_to_resource(shmem, 0, &res);
732	of_node_put(shmem);
733	if (ret) {
734		dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
735		return ret;
736	}
737
738	size = resource_size(&res);
739	cinfo->payload = devm_ioremap(info->dev, res.start, size);
740	if (!cinfo->payload) {
741		dev_err(dev, "failed to ioremap SCMI Tx payload\n");
742		return -EADDRNOTAVAIL;
743	}
744
745	/* Transmit channel is first entry i.e. index 0 */
746	cinfo->chan = mbox_request_channel(cl, 0);
747	if (IS_ERR(cinfo->chan)) {
748		ret = PTR_ERR(cinfo->chan);
749		if (ret != -EPROBE_DEFER)
750			dev_err(dev, "failed to request SCMI Tx mailbox\n");
751		return ret;
752	}
753
754idr_alloc:
755	ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
756	if (ret != prot_id) {
757		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
758		return ret;
759	}
760
761	cinfo->handle = &info->handle;
762	return 0;
763}
764
 
 
 
 
 
 
 
 
 
 
 
765static inline void
766scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
767			    int prot_id)
768{
769	struct scmi_device *sdev;
770
771	sdev = scmi_device_create(np, info->dev, prot_id);
772	if (!sdev) {
773		dev_err(info->dev, "failed to create %d protocol device\n",
774			prot_id);
775		return;
776	}
777
778	if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
779		dev_err(&sdev->dev, "failed to setup transport\n");
780		scmi_device_destroy(sdev);
781		return;
782	}
783
784	/* setup handle now as the transport is ready */
785	scmi_set_handle(sdev);
786}
787
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
788static int scmi_probe(struct platform_device *pdev)
789{
790	int ret;
791	struct scmi_handle *handle;
792	const struct scmi_desc *desc;
793	struct scmi_info *info;
794	struct device *dev = &pdev->dev;
795	struct device_node *child, *np = dev->of_node;
796
797	/* Only mailbox method supported, check for the presence of one */
798	if (scmi_mailbox_check(np)) {
799		dev_err(dev, "no mailbox found in %pOF\n", np);
800		return -EINVAL;
801	}
802
803	desc = of_match_device(scmi_of_match, dev)->data;
804
805	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
806	if (!info)
807		return -ENOMEM;
808
809	info->dev = dev;
810	info->desc = desc;
811	INIT_LIST_HEAD(&info->node);
812
813	ret = scmi_xfer_info_init(info);
814	if (ret)
815		return ret;
816
817	platform_set_drvdata(pdev, info);
818	idr_init(&info->tx_idr);
 
819
820	handle = &info->handle;
821	handle->dev = info->dev;
822	handle->version = &info->version;
823
824	ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
 
 
 
 
825	if (ret)
826		return ret;
827
 
 
 
828	ret = scmi_base_protocol_init(handle);
829	if (ret) {
830		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
831		return ret;
832	}
833
834	mutex_lock(&scmi_list_mutex);
835	list_add_tail(&info->node, &scmi_list);
836	mutex_unlock(&scmi_list_mutex);
837
838	for_each_available_child_of_node(np, child) {
839		u32 prot_id;
840
841		if (of_property_read_u32(child, "reg", &prot_id))
842			continue;
843
844		prot_id &= MSG_PROTOCOL_ID_MASK;
 
845
846		if (!scmi_is_protocol_implemented(handle, prot_id)) {
847			dev_err(dev, "SCMI protocol %d not implemented\n",
848				prot_id);
849			continue;
850		}
851
852		scmi_create_protocol_device(child, info, prot_id);
853	}
854
855	return 0;
856}
857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
858static struct platform_driver scmi_driver = {
859	.driver = {
860		   .name = "arm-scmi",
861		   .of_match_table = scmi_of_match,
 
862		   },
863	.probe = scmi_probe,
864	.remove = scmi_remove,
865};
866
867module_platform_driver(scmi_driver);
868
869MODULE_ALIAS("platform: arm-scmi");
870MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
871MODULE_DESCRIPTION("ARM SCMI protocol driver");
872MODULE_LICENSE("GPL v2");