Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * PowerNV OPAL asynchronous completion interfaces
  4 *
  5 * Copyright 2013-2017 IBM Corp.
 
 
 
 
 
  6 */
  7
  8#undef DEBUG
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/slab.h>
 13#include <linux/sched.h>
 14#include <linux/semaphore.h>
 15#include <linux/spinlock.h>
 16#include <linux/wait.h>
 17#include <linux/gfp.h>
 18#include <linux/of.h>
 19#include <asm/machdep.h>
 20#include <asm/opal.h>
 21
 22enum opal_async_token_state {
 23	ASYNC_TOKEN_UNALLOCATED = 0,
 24	ASYNC_TOKEN_ALLOCATED,
 25	ASYNC_TOKEN_DISPATCHED,
 26	ASYNC_TOKEN_ABANDONED,
 27	ASYNC_TOKEN_COMPLETED
 28};
 29
 30struct opal_async_token {
 31	enum opal_async_token_state state;
 32	struct opal_msg response;
 33};
 34
 
 
 35static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
 36static DEFINE_SPINLOCK(opal_async_comp_lock);
 37static struct semaphore opal_async_sem;
 
 38static unsigned int opal_max_async_tokens;
 39static struct opal_async_token *opal_async_tokens;
 40
 41static int __opal_async_get_token(void)
 42{
 43	unsigned long flags;
 44	int i, token = -EBUSY;
 45
 46	spin_lock_irqsave(&opal_async_comp_lock, flags);
 
 
 
 
 
 47
 48	for (i = 0; i < opal_max_async_tokens; i++) {
 49		if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
 50			opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
 51			token = i;
 52			break;
 53		}
 54	}
 55
 
 
 
 56	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
 57	return token;
 58}
 59
 60/*
 61 * Note: If the returned token is used in an opal call and opal returns
 62 * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
 63 * opal_async_wait_response_interruptible() at least once before calling another
 64 * opal_async_* function
 65 */
 66int opal_async_get_token_interruptible(void)
 67{
 68	int token;
 69
 70	/* Wait until a token is available */
 71	if (down_interruptible(&opal_async_sem))
 72		return -ERESTARTSYS;
 73
 74	token = __opal_async_get_token();
 75	if (token < 0)
 76		up(&opal_async_sem);
 77
 78	return token;
 79}
 80EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
 81
 82static int __opal_async_release_token(int token)
 83{
 84	unsigned long flags;
 85	int rc;
 86
 87	if (token < 0 || token >= opal_max_async_tokens) {
 88		pr_err("%s: Passed token is out of range, token %d\n",
 89				__func__, token);
 90		return -EINVAL;
 91	}
 92
 93	spin_lock_irqsave(&opal_async_comp_lock, flags);
 94	switch (opal_async_tokens[token].state) {
 95	case ASYNC_TOKEN_COMPLETED:
 96	case ASYNC_TOKEN_ALLOCATED:
 97		opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
 98		rc = 0;
 99		break;
100	/*
101	 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
102	 * Mark a DISPATCHED token as ABANDONED so that the response handling
103	 * code knows no one cares and that it can free it then.
104	 */
105	case ASYNC_TOKEN_DISPATCHED:
106		opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
107		fallthrough;
108	default:
109		rc = 1;
110	}
111	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
112
113	return rc;
114}
115
116int opal_async_release_token(int token)
117{
118	int ret;
119
120	ret = __opal_async_release_token(token);
121	if (!ret)
122		up(&opal_async_sem);
123
124	return ret;
 
 
125}
126EXPORT_SYMBOL_GPL(opal_async_release_token);
127
128int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
129{
130	if (token >= opal_max_async_tokens) {
131		pr_err("%s: Invalid token passed\n", __func__);
132		return -EINVAL;
133	}
134
135	if (!msg) {
136		pr_err("%s: Invalid message pointer passed\n", __func__);
137		return -EINVAL;
138	}
139
140	/*
141	 * There is no need to mark the token as dispatched, wait_event()
142	 * will block until the token completes.
143	 *
144	 * Wakeup the poller before we wait for events to speed things
145	 * up on platforms or simulators where the interrupts aren't
146	 * functional.
147	 */
148	opal_wake_poller();
149	wait_event(opal_async_wait, opal_async_tokens[token].state
150			== ASYNC_TOKEN_COMPLETED);
151	memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
152
153	return 0;
154}
155EXPORT_SYMBOL_GPL(opal_async_wait_response);
156
157int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
158{
159	unsigned long flags;
160	int ret;
161
162	if (token >= opal_max_async_tokens) {
163		pr_err("%s: Invalid token passed\n", __func__);
164		return -EINVAL;
165	}
166
167	if (!msg) {
168		pr_err("%s: Invalid message pointer passed\n", __func__);
169		return -EINVAL;
170	}
171
172	/*
173	 * The first time this gets called we mark the token as DISPATCHED
174	 * so that if wait_event_interruptible() returns not zero and the
175	 * caller frees the token, we know not to actually free the token
176	 * until the response comes.
177	 *
178	 * Only change if the token is ALLOCATED - it may have been
179	 * completed even before the caller gets around to calling this
180	 * the first time.
181	 *
182	 * There is also a dirty great comment at the token allocation
183	 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
184	 * the caller then the caller *must* call this or the not
185	 * interruptible version before doing anything else with the
186	 * token.
187	 */
188	if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
189		spin_lock_irqsave(&opal_async_comp_lock, flags);
190		if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
191			opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
192		spin_unlock_irqrestore(&opal_async_comp_lock, flags);
193	}
194
195	/*
196	 * Wakeup the poller before we wait for events to speed things
197	 * up on platforms or simulators where the interrupts aren't
198	 * functional.
199	 */
200	opal_wake_poller();
201	ret = wait_event_interruptible(opal_async_wait,
202			opal_async_tokens[token].state ==
203			ASYNC_TOKEN_COMPLETED);
204	if (!ret)
205		memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
206
207	return ret;
208}
209EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
210
211/* Called from interrupt context */
212static int opal_async_comp_event(struct notifier_block *nb,
213		unsigned long msg_type, void *msg)
214{
215	struct opal_msg *comp_msg = msg;
216	enum opal_async_token_state state;
217	unsigned long flags;
218	uint64_t token;
219
220	if (msg_type != OPAL_MSG_ASYNC_COMP)
221		return 0;
222
223	token = be64_to_cpu(comp_msg->params[0]);
 
224	spin_lock_irqsave(&opal_async_comp_lock, flags);
225	state = opal_async_tokens[token].state;
226	opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
227	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
228
229	if (state == ASYNC_TOKEN_ABANDONED) {
230		/* Free the token, no one else will */
231		opal_async_release_token(token);
232		return 0;
233	}
234	memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
235	wake_up(&opal_async_wait);
236
237	return 0;
238}
239
240static struct notifier_block opal_async_comp_nb = {
241		.notifier_call	= opal_async_comp_event,
242		.next		= NULL,
243		.priority	= 0,
244};
245
246int __init opal_async_comp_init(void)
247{
248	struct device_node *opal_node;
249	const __be32 *async;
250	int err;
251
252	opal_node = of_find_node_by_path("/ibm,opal");
253	if (!opal_node) {
254		pr_err("%s: Opal node not found\n", __func__);
255		err = -ENOENT;
256		goto out;
257	}
258
259	async = of_get_property(opal_node, "opal-msg-async-num", NULL);
260	if (!async) {
261		pr_err("%s: %pOF has no opal-msg-async-num\n",
262				__func__, opal_node);
263		err = -ENOENT;
264		goto out_opal_node;
265	}
266
267	opal_max_async_tokens = be32_to_cpup(async);
268	opal_async_tokens = kcalloc(opal_max_async_tokens,
269			sizeof(*opal_async_tokens), GFP_KERNEL);
270	if (!opal_async_tokens) {
271		err = -ENOMEM;
272		goto out_opal_node;
273	}
274
275	err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
276			&opal_async_comp_nb);
277	if (err) {
278		pr_err("%s: Can't register OPAL event notifier (%d)\n",
279				__func__, err);
280		kfree(opal_async_tokens);
281		goto out_opal_node;
282	}
283
284	sema_init(&opal_async_sem, opal_max_async_tokens);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
286out_opal_node:
287	of_node_put(opal_node);
288out:
289	return err;
290}
v4.6
 
  1/*
  2 * PowerNV OPAL asynchronous completion interfaces
  3 *
  4 * Copyright 2013 IBM Corp.
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#undef DEBUG
 13
 14#include <linux/kernel.h>
 15#include <linux/init.h>
 16#include <linux/slab.h>
 17#include <linux/sched.h>
 18#include <linux/semaphore.h>
 19#include <linux/spinlock.h>
 20#include <linux/wait.h>
 21#include <linux/gfp.h>
 22#include <linux/of.h>
 23#include <asm/machdep.h>
 24#include <asm/opal.h>
 25
 26#define N_ASYNC_COMPLETIONS	64
 
 
 
 
 
 
 
 
 
 
 
 27
 28static DECLARE_BITMAP(opal_async_complete_map, N_ASYNC_COMPLETIONS) = {~0UL};
 29static DECLARE_BITMAP(opal_async_token_map, N_ASYNC_COMPLETIONS);
 30static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
 31static DEFINE_SPINLOCK(opal_async_comp_lock);
 32static struct semaphore opal_async_sem;
 33static struct opal_msg *opal_async_responses;
 34static unsigned int opal_max_async_tokens;
 
 35
 36int __opal_async_get_token(void)
 37{
 38	unsigned long flags;
 39	int token;
 40
 41	spin_lock_irqsave(&opal_async_comp_lock, flags);
 42	token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
 43	if (token >= opal_max_async_tokens) {
 44		token = -EBUSY;
 45		goto out;
 46	}
 47
 48	if (__test_and_set_bit(token, opal_async_token_map)) {
 49		token = -EBUSY;
 50		goto out;
 
 
 
 51	}
 52
 53	__clear_bit(token, opal_async_complete_map);
 54
 55out:
 56	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
 57	return token;
 58}
 59
 
 
 
 
 
 
 60int opal_async_get_token_interruptible(void)
 61{
 62	int token;
 63
 64	/* Wait until a token is available */
 65	if (down_interruptible(&opal_async_sem))
 66		return -ERESTARTSYS;
 67
 68	token = __opal_async_get_token();
 69	if (token < 0)
 70		up(&opal_async_sem);
 71
 72	return token;
 73}
 74EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
 75
 76int __opal_async_release_token(int token)
 77{
 78	unsigned long flags;
 
 79
 80	if (token < 0 || token >= opal_max_async_tokens) {
 81		pr_err("%s: Passed token is out of range, token %d\n",
 82				__func__, token);
 83		return -EINVAL;
 84	}
 85
 86	spin_lock_irqsave(&opal_async_comp_lock, flags);
 87	__set_bit(token, opal_async_complete_map);
 88	__clear_bit(token, opal_async_token_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
 90
 91	return 0;
 92}
 93
 94int opal_async_release_token(int token)
 95{
 96	int ret;
 97
 98	ret = __opal_async_release_token(token);
 99	if (ret)
100		return ret;
101
102	up(&opal_async_sem);
103
104	return 0;
105}
106EXPORT_SYMBOL_GPL(opal_async_release_token);
107
108int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
109{
110	if (token >= opal_max_async_tokens) {
111		pr_err("%s: Invalid token passed\n", __func__);
112		return -EINVAL;
113	}
114
115	if (!msg) {
116		pr_err("%s: Invalid message pointer passed\n", __func__);
117		return -EINVAL;
118	}
119
120	wait_event(opal_async_wait, test_bit(token, opal_async_complete_map));
121	memcpy(msg, &opal_async_responses[token], sizeof(*msg));
 
 
 
 
 
 
 
 
 
 
122
123	return 0;
124}
125EXPORT_SYMBOL_GPL(opal_async_wait_response);
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127static int opal_async_comp_event(struct notifier_block *nb,
128		unsigned long msg_type, void *msg)
129{
130	struct opal_msg *comp_msg = msg;
 
131	unsigned long flags;
132	uint64_t token;
133
134	if (msg_type != OPAL_MSG_ASYNC_COMP)
135		return 0;
136
137	token = be64_to_cpu(comp_msg->params[0]);
138	memcpy(&opal_async_responses[token], comp_msg, sizeof(*comp_msg));
139	spin_lock_irqsave(&opal_async_comp_lock, flags);
140	__set_bit(token, opal_async_complete_map);
 
141	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
142
 
 
 
 
 
 
143	wake_up(&opal_async_wait);
144
145	return 0;
146}
147
148static struct notifier_block opal_async_comp_nb = {
149		.notifier_call	= opal_async_comp_event,
150		.next		= NULL,
151		.priority	= 0,
152};
153
154int __init opal_async_comp_init(void)
155{
156	struct device_node *opal_node;
157	const __be32 *async;
158	int err;
159
160	opal_node = of_find_node_by_path("/ibm,opal");
161	if (!opal_node) {
162		pr_err("%s: Opal node not found\n", __func__);
163		err = -ENOENT;
164		goto out;
165	}
166
167	async = of_get_property(opal_node, "opal-msg-async-num", NULL);
168	if (!async) {
169		pr_err("%s: %s has no opal-msg-async-num\n",
170				__func__, opal_node->full_name);
171		err = -ENOENT;
172		goto out_opal_node;
173	}
174
175	opal_max_async_tokens = be32_to_cpup(async);
176	if (opal_max_async_tokens > N_ASYNC_COMPLETIONS)
177		opal_max_async_tokens = N_ASYNC_COMPLETIONS;
 
 
 
 
178
179	err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
180			&opal_async_comp_nb);
181	if (err) {
182		pr_err("%s: Can't register OPAL event notifier (%d)\n",
183				__func__, err);
 
184		goto out_opal_node;
185	}
186
187	opal_async_responses = kzalloc(
188			sizeof(*opal_async_responses) * opal_max_async_tokens,
189			GFP_KERNEL);
190	if (!opal_async_responses) {
191		pr_err("%s: Out of memory, failed to do asynchronous "
192				"completion init\n", __func__);
193		err = -ENOMEM;
194		goto out_opal_node;
195	}
196
197	/* Initialize to 1 less than the maximum tokens available, as we may
198	 * require to pop one during emergency through synchronous call to
199	 * __opal_async_get_token()
200	 */
201	sema_init(&opal_async_sem, opal_max_async_tokens - 1);
202
203out_opal_node:
204	of_node_put(opal_node);
205out:
206	return err;
207}