Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
  4 */
  5
  6#include <linux/clk/tegra.h>
  7#include <linux/genalloc.h>
  8#include <linux/mailbox_client.h>
  9#include <linux/module.h>
 10#include <linux/of.h>
 11#include <linux/of_address.h>
 12#include <linux/of_device.h>
 13#include <linux/platform_device.h>
 14#include <linux/pm.h>
 15#include <linux/semaphore.h>
 16#include <linux/sched/clock.h>
 17
 18#include <soc/tegra/bpmp.h>
 19#include <soc/tegra/bpmp-abi.h>
 20#include <soc/tegra/ivc.h>
 21
 22#include "bpmp-private.h"
 23
 24#define MSG_ACK		BIT(0)
 25#define MSG_RING	BIT(1)
 26#define TAG_SZ		32
 27
 28static inline struct tegra_bpmp *
 29mbox_client_to_bpmp(struct mbox_client *client)
 30{
 31	return container_of(client, struct tegra_bpmp, mbox.client);
 32}
 33
 34static inline const struct tegra_bpmp_ops *
 35channel_to_ops(struct tegra_bpmp_channel *channel)
 36{
 37	struct tegra_bpmp *bpmp = channel->bpmp;
 38
 39	return bpmp->soc->ops;
 40}
 41
 42struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
 43{
 44	struct platform_device *pdev;
 45	struct tegra_bpmp *bpmp;
 46	struct device_node *np;
 47
 48	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
 49	if (!np)
 50		return ERR_PTR(-ENOENT);
 51
 52	pdev = of_find_device_by_node(np);
 53	if (!pdev) {
 54		bpmp = ERR_PTR(-ENODEV);
 55		goto put;
 56	}
 57
 58	bpmp = platform_get_drvdata(pdev);
 59	if (!bpmp) {
 60		bpmp = ERR_PTR(-EPROBE_DEFER);
 61		put_device(&pdev->dev);
 62		goto put;
 63	}
 64
 65put:
 66	of_node_put(np);
 67	return bpmp;
 68}
 69EXPORT_SYMBOL_GPL(tegra_bpmp_get);
 70
 71void tegra_bpmp_put(struct tegra_bpmp *bpmp)
 72{
 73	if (bpmp)
 74		put_device(bpmp->dev);
 75}
 76EXPORT_SYMBOL_GPL(tegra_bpmp_put);
 77
 78static int
 79tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
 80{
 81	struct tegra_bpmp *bpmp = channel->bpmp;
 82	unsigned int count;
 83	int index;
 84
 85	count = bpmp->soc->channels.thread.count;
 86
 87	index = channel - channel->bpmp->threaded_channels;
 88	if (index < 0 || index >= count)
 89		return -EINVAL;
 90
 91	return index;
 92}
 93
 94static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
 95{
 96	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
 97	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
 98	       (msg->tx.size == 0 || msg->tx.data) &&
 99	       (msg->rx.size == 0 || msg->rx.data);
100}
101
102static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
103{
104	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
105
106	return ops->is_response_ready(channel);
107}
108
109static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
110{
111	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
112
113	return ops->is_request_ready(channel);
114}
115
116static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
117{
118	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
119	ktime_t end;
120
121	end = ktime_add_us(ktime_get(), timeout);
122
123	do {
124		if (tegra_bpmp_is_response_ready(channel))
125			return 0;
126	} while (ktime_before(ktime_get(), end));
127
128	return -ETIMEDOUT;
129}
130
131static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
132{
133	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
134
135	return ops->ack_response(channel);
136}
137
138static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
139{
140	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
141
142	return ops->ack_request(channel);
143}
144
145static bool
146tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
147{
148	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
149
150	return ops->is_request_channel_free(channel);
151}
152
153static bool
154tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
155{
156	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
157
158	return ops->is_response_channel_free(channel);
159}
160
161static int
162tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
163{
164	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
165	ktime_t start, now;
166
167	start = ns_to_ktime(local_clock());
168
169	do {
170		if (tegra_bpmp_is_request_channel_free(channel))
171			return 0;
172
173		now = ns_to_ktime(local_clock());
174	} while (ktime_us_delta(now, start) < timeout);
175
176	return -ETIMEDOUT;
177}
178
179static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
180{
181	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
182
183	return ops->post_request(channel);
184}
185
186static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
187{
188	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
189
190	return ops->post_response(channel);
191}
192
193static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
194{
195	return bpmp->soc->ops->ring_doorbell(bpmp);
196}
197
198static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
199					 void *data, size_t size, int *ret)
200{
201	int err;
202
203	if (data && size > 0)
204		memcpy(data, channel->ib->data, size);
205
206	err = tegra_bpmp_ack_response(channel);
207	if (err < 0)
208		return err;
209
210	*ret = channel->ib->code;
211
212	return 0;
213}
214
215static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
216				       void *data, size_t size, int *ret)
217{
218	struct tegra_bpmp *bpmp = channel->bpmp;
219	unsigned long flags;
220	ssize_t err;
221	int index;
222
223	index = tegra_bpmp_channel_get_thread_index(channel);
224	if (index < 0) {
225		err = index;
226		goto unlock;
227	}
228
229	spin_lock_irqsave(&bpmp->lock, flags);
230	err = __tegra_bpmp_channel_read(channel, data, size, ret);
231	clear_bit(index, bpmp->threaded.allocated);
232	spin_unlock_irqrestore(&bpmp->lock, flags);
233
234unlock:
235	up(&bpmp->threaded.lock);
236
237	return err;
238}
239
240static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
241					  unsigned int mrq, unsigned long flags,
242					  const void *data, size_t size)
243{
244	channel->ob->code = mrq;
245	channel->ob->flags = flags;
246
247	if (data && size > 0)
248		memcpy(channel->ob->data, data, size);
249
250	return tegra_bpmp_post_request(channel);
251}
252
253static struct tegra_bpmp_channel *
254tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
255			  const void *data, size_t size)
256{
257	unsigned long timeout = bpmp->soc->channels.thread.timeout;
258	unsigned int count = bpmp->soc->channels.thread.count;
259	struct tegra_bpmp_channel *channel;
260	unsigned long flags;
261	unsigned int index;
262	int err;
263
264	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
265	if (err < 0)
266		return ERR_PTR(err);
267
268	spin_lock_irqsave(&bpmp->lock, flags);
269
270	index = find_first_zero_bit(bpmp->threaded.allocated, count);
271	if (index == count) {
272		err = -EBUSY;
273		goto unlock;
274	}
275
276	channel = &bpmp->threaded_channels[index];
277
278	if (!tegra_bpmp_is_request_channel_free(channel)) {
279		err = -EBUSY;
280		goto unlock;
281	}
282
283	set_bit(index, bpmp->threaded.allocated);
284
285	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
286					 data, size);
287	if (err < 0)
288		goto clear_allocated;
289
290	set_bit(index, bpmp->threaded.busy);
291
292	spin_unlock_irqrestore(&bpmp->lock, flags);
293	return channel;
294
295clear_allocated:
296	clear_bit(index, bpmp->threaded.allocated);
297unlock:
298	spin_unlock_irqrestore(&bpmp->lock, flags);
299	up(&bpmp->threaded.lock);
300
301	return ERR_PTR(err);
302}
303
304static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
305					unsigned int mrq, unsigned long flags,
306					const void *data, size_t size)
307{
308	int err;
309
310	err = tegra_bpmp_wait_request_channel_free(channel);
311	if (err < 0)
312		return err;
313
314	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
315}
316
317int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
318			       struct tegra_bpmp_message *msg)
319{
320	struct tegra_bpmp_channel *channel;
321	int err;
322
323	if (WARN_ON(!irqs_disabled()))
324		return -EPERM;
325
326	if (!tegra_bpmp_message_valid(msg))
327		return -EINVAL;
328
329	channel = bpmp->tx_channel;
330
331	spin_lock(&bpmp->atomic_tx_lock);
332
333	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
334				       msg->tx.data, msg->tx.size);
335	if (err < 0) {
336		spin_unlock(&bpmp->atomic_tx_lock);
337		return err;
338	}
339
340	spin_unlock(&bpmp->atomic_tx_lock);
341
342	err = tegra_bpmp_ring_doorbell(bpmp);
343	if (err < 0)
344		return err;
345
346	err = tegra_bpmp_wait_response(channel);
347	if (err < 0)
348		return err;
349
350	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
351					 &msg->rx.ret);
352}
353EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
354
355int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
356			struct tegra_bpmp_message *msg)
357{
358	struct tegra_bpmp_channel *channel;
359	unsigned long timeout;
360	int err;
361
362	if (WARN_ON(irqs_disabled()))
363		return -EPERM;
364
365	if (!tegra_bpmp_message_valid(msg))
366		return -EINVAL;
367
368	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
369					    msg->tx.size);
370	if (IS_ERR(channel))
371		return PTR_ERR(channel);
372
373	err = tegra_bpmp_ring_doorbell(bpmp);
374	if (err < 0)
375		return err;
376
377	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
378
379	err = wait_for_completion_timeout(&channel->completion, timeout);
380	if (err == 0)
381		return -ETIMEDOUT;
382
383	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
384				       &msg->rx.ret);
385}
386EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
387
388static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
389						  unsigned int mrq)
390{
391	struct tegra_bpmp_mrq *entry;
392
393	list_for_each_entry(entry, &bpmp->mrqs, list)
394		if (entry->mrq == mrq)
395			return entry;
396
397	return NULL;
398}
399
400void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
401			   const void *data, size_t size)
402{
403	unsigned long flags = channel->ib->flags;
404	struct tegra_bpmp *bpmp = channel->bpmp;
405	int err;
406
407	if (WARN_ON(size > MSG_DATA_MIN_SZ))
408		return;
409
410	err = tegra_bpmp_ack_request(channel);
411	if (WARN_ON(err < 0))
412		return;
413
414	if ((flags & MSG_ACK) == 0)
415		return;
416
417	if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
418		return;
419
420	channel->ob->code = code;
421
422	if (data && size > 0)
423		memcpy(channel->ob->data, data, size);
424
425	err = tegra_bpmp_post_response(channel);
426	if (WARN_ON(err < 0))
427		return;
428
429	if (flags & MSG_RING) {
430		err = tegra_bpmp_ring_doorbell(bpmp);
431		if (WARN_ON(err < 0))
432			return;
433	}
434}
435EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
436
437static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
438				  unsigned int mrq,
439				  struct tegra_bpmp_channel *channel)
440{
441	struct tegra_bpmp_mrq *entry;
442	u32 zero = 0;
443
444	spin_lock(&bpmp->lock);
445
446	entry = tegra_bpmp_find_mrq(bpmp, mrq);
447	if (!entry) {
448		spin_unlock(&bpmp->lock);
449		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
450		return;
451	}
452
453	entry->handler(mrq, channel, entry->data);
454
455	spin_unlock(&bpmp->lock);
456}
457
458int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
459			   tegra_bpmp_mrq_handler_t handler, void *data)
460{
461	struct tegra_bpmp_mrq *entry;
462	unsigned long flags;
463
464	if (!handler)
465		return -EINVAL;
466
467	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
468	if (!entry)
469		return -ENOMEM;
470
471	spin_lock_irqsave(&bpmp->lock, flags);
472
473	entry->mrq = mrq;
474	entry->handler = handler;
475	entry->data = data;
476	list_add(&entry->list, &bpmp->mrqs);
477
478	spin_unlock_irqrestore(&bpmp->lock, flags);
479
480	return 0;
481}
482EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
483
484void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
485{
486	struct tegra_bpmp_mrq *entry;
487	unsigned long flags;
488
489	spin_lock_irqsave(&bpmp->lock, flags);
490
491	entry = tegra_bpmp_find_mrq(bpmp, mrq);
492	if (!entry)
493		goto unlock;
494
495	list_del(&entry->list);
496	devm_kfree(bpmp->dev, entry);
497
498unlock:
499	spin_unlock_irqrestore(&bpmp->lock, flags);
500}
501EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
502
503bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
504{
505	struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
506	struct mrq_query_abi_response resp;
507	struct tegra_bpmp_message msg = {
508		.mrq = MRQ_QUERY_ABI,
509		.tx = {
510			.data = &req,
511			.size = sizeof(req),
512		},
513		.rx = {
514			.data = &resp,
515			.size = sizeof(resp),
516		},
517	};
518	int err;
519
520	err = tegra_bpmp_transfer(bpmp, &msg);
521	if (err || msg.rx.ret)
522		return false;
523
524	return resp.status == 0;
525}
526EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
527
528static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
529				       struct tegra_bpmp_channel *channel,
530				       void *data)
531{
532	struct mrq_ping_request *request;
533	struct mrq_ping_response response;
534
535	request = (struct mrq_ping_request *)channel->ib->data;
536
537	memset(&response, 0, sizeof(response));
538	response.reply = request->challenge << 1;
539
540	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
541}
542
543static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
544{
545	struct mrq_ping_response response;
546	struct mrq_ping_request request;
547	struct tegra_bpmp_message msg;
548	unsigned long flags;
549	ktime_t start, end;
550	int err;
551
552	memset(&request, 0, sizeof(request));
553	request.challenge = 1;
554
555	memset(&response, 0, sizeof(response));
556
557	memset(&msg, 0, sizeof(msg));
558	msg.mrq = MRQ_PING;
559	msg.tx.data = &request;
560	msg.tx.size = sizeof(request);
561	msg.rx.data = &response;
562	msg.rx.size = sizeof(response);
563
564	local_irq_save(flags);
565	start = ktime_get();
566	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
567	end = ktime_get();
568	local_irq_restore(flags);
569
570	if (!err)
571		dev_dbg(bpmp->dev,
572			"ping ok: challenge: %u, response: %u, time: %lld\n",
573			request.challenge, response.reply,
574			ktime_to_us(ktime_sub(end, start)));
575
576	return err;
577}
578
579/* deprecated version of tag query */
580static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
581					   size_t size)
582{
583	struct mrq_query_tag_request request;
584	struct tegra_bpmp_message msg;
585	unsigned long flags;
586	dma_addr_t phys;
587	void *virt;
588	int err;
589
590	if (size != TAG_SZ)
591		return -EINVAL;
592
593	virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
594				  GFP_KERNEL | GFP_DMA32);
595	if (!virt)
596		return -ENOMEM;
597
598	memset(&request, 0, sizeof(request));
599	request.addr = phys;
600
601	memset(&msg, 0, sizeof(msg));
602	msg.mrq = MRQ_QUERY_TAG;
603	msg.tx.data = &request;
604	msg.tx.size = sizeof(request);
605
606	local_irq_save(flags);
607	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
608	local_irq_restore(flags);
609
610	if (err == 0)
611		memcpy(tag, virt, TAG_SZ);
612
613	dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
614
615	return err;
616}
617
618static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
619				       size_t size)
620{
621	if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
622		struct mrq_query_fw_tag_response resp;
623		struct tegra_bpmp_message msg = {
624			.mrq = MRQ_QUERY_FW_TAG,
625			.rx = {
626				.data = &resp,
627				.size = sizeof(resp),
628			},
629		};
630		int err;
631
632		if (size != sizeof(resp.tag))
633			return -EINVAL;
634
635		err = tegra_bpmp_transfer(bpmp, &msg);
636
637		if (err)
638			return err;
639		if (msg.rx.ret < 0)
640			return -EINVAL;
641
642		memcpy(tag, resp.tag, sizeof(resp.tag));
643		return 0;
644	}
645
646	return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
647}
648
649static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
650{
651	unsigned long flags = channel->ob->flags;
652
653	if ((flags & MSG_RING) == 0)
654		return;
655
656	complete(&channel->completion);
657}
658
659void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
660{
661	struct tegra_bpmp_channel *channel;
662	unsigned int i, count;
663	unsigned long *busy;
664
665	channel = bpmp->rx_channel;
666	count = bpmp->soc->channels.thread.count;
667	busy = bpmp->threaded.busy;
668
669	if (tegra_bpmp_is_request_ready(channel))
670		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
671
672	spin_lock(&bpmp->lock);
673
674	for_each_set_bit(i, busy, count) {
675		struct tegra_bpmp_channel *channel;
676
677		channel = &bpmp->threaded_channels[i];
678
679		if (tegra_bpmp_is_response_ready(channel)) {
680			tegra_bpmp_channel_signal(channel);
681			clear_bit(i, busy);
682		}
683	}
684
685	spin_unlock(&bpmp->lock);
686}
687
688static int tegra_bpmp_probe(struct platform_device *pdev)
689{
690	struct tegra_bpmp *bpmp;
691	char tag[TAG_SZ];
692	size_t size;
693	int err;
694
695	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
696	if (!bpmp)
697		return -ENOMEM;
698
699	bpmp->soc = of_device_get_match_data(&pdev->dev);
700	bpmp->dev = &pdev->dev;
701
702	INIT_LIST_HEAD(&bpmp->mrqs);
703	spin_lock_init(&bpmp->lock);
704
705	bpmp->threaded.count = bpmp->soc->channels.thread.count;
706	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
707
708	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
709
710	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
711	if (!bpmp->threaded.allocated)
712		return -ENOMEM;
713
714	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
715	if (!bpmp->threaded.busy)
716		return -ENOMEM;
717
718	spin_lock_init(&bpmp->atomic_tx_lock);
719	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
720					GFP_KERNEL);
721	if (!bpmp->tx_channel)
722		return -ENOMEM;
723
724	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
725	                                GFP_KERNEL);
726	if (!bpmp->rx_channel)
727		return -ENOMEM;
728
729	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
730					       sizeof(*bpmp->threaded_channels),
731					       GFP_KERNEL);
732	if (!bpmp->threaded_channels)
733		return -ENOMEM;
734
735	err = bpmp->soc->ops->init(bpmp);
736	if (err < 0)
737		return err;
738
739	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
740				     tegra_bpmp_mrq_handle_ping, bpmp);
741	if (err < 0)
742		goto deinit;
743
744	err = tegra_bpmp_ping(bpmp);
745	if (err < 0) {
746		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
747		goto free_mrq;
748	}
749
750	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
751	if (err < 0) {
752		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
753		goto free_mrq;
754	}
755
756	dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
757
758	platform_set_drvdata(pdev, bpmp);
759
760	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
761	if (err < 0)
762		goto free_mrq;
763
764	if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
765		err = tegra_bpmp_init_clocks(bpmp);
766		if (err < 0)
767			goto free_mrq;
768	}
769
770	if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
771		err = tegra_bpmp_init_resets(bpmp);
772		if (err < 0)
773			goto free_mrq;
774	}
775
776	if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
777		err = tegra_bpmp_init_powergates(bpmp);
778		if (err < 0)
779			goto free_mrq;
780	}
781
782	err = tegra_bpmp_init_debugfs(bpmp);
783	if (err < 0)
784		dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
785
786	return 0;
787
788free_mrq:
789	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
790deinit:
791	if (bpmp->soc->ops->deinit)
792		bpmp->soc->ops->deinit(bpmp);
793
794	return err;
795}
796
797static int __maybe_unused tegra_bpmp_resume(struct device *dev)
798{
799	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
800
801	if (bpmp->soc->ops->resume)
802		return bpmp->soc->ops->resume(bpmp);
803	else
804		return 0;
805}
806
807static const struct dev_pm_ops tegra_bpmp_pm_ops = {
808	.resume_noirq = tegra_bpmp_resume,
809};
810
811#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
812    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
813static const struct tegra_bpmp_soc tegra186_soc = {
814	.channels = {
815		.cpu_tx = {
816			.offset = 3,
817			.timeout = 60 * USEC_PER_SEC,
818		},
819		.thread = {
820			.offset = 0,
821			.count = 3,
822			.timeout = 600 * USEC_PER_SEC,
823		},
824		.cpu_rx = {
825			.offset = 13,
826			.timeout = 0,
827		},
828	},
829	.ops = &tegra186_bpmp_ops,
830	.num_resets = 193,
831};
832#endif
833
834#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
835static const struct tegra_bpmp_soc tegra210_soc = {
836	.channels = {
837		.cpu_tx = {
838			.offset = 0,
839			.count = 1,
840			.timeout = 60 * USEC_PER_SEC,
841		},
842		.thread = {
843			.offset = 4,
844			.count = 1,
845			.timeout = 600 * USEC_PER_SEC,
846		},
847		.cpu_rx = {
848			.offset = 8,
849			.count = 1,
850			.timeout = 0,
851		},
852	},
853	.ops = &tegra210_bpmp_ops,
854};
855#endif
856
857static const struct of_device_id tegra_bpmp_match[] = {
858#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
859    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
860	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
861#endif
862#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
863	{ .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
864#endif
865	{ }
866};
867
868static struct platform_driver tegra_bpmp_driver = {
869	.driver = {
870		.name = "tegra-bpmp",
871		.of_match_table = tegra_bpmp_match,
872		.pm = &tegra_bpmp_pm_ops,
873		.suppress_bind_attrs = true,
874	},
875	.probe = tegra_bpmp_probe,
876};
877builtin_platform_driver(tegra_bpmp_driver);