Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Hardware spinlock framework
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8 */
9
10#define pr_fmt(fmt) "%s: " fmt, __func__
11
12#include <linux/delay.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/err.h>
18#include <linux/jiffies.h>
19#include <linux/radix-tree.h>
20#include <linux/hwspinlock.h>
21#include <linux/pm_runtime.h>
22#include <linux/mutex.h>
23#include <linux/of.h>
24
25#include "hwspinlock_internal.h"
26
27/* retry delay used in atomic context */
28#define HWSPINLOCK_RETRY_DELAY_US 100
29
30/* radix tree tags */
31#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
32
33/*
34 * A radix tree is used to maintain the available hwspinlock instances.
35 * The tree associates hwspinlock pointers with their integer key id,
36 * and provides easy-to-use API which makes the hwspinlock core code simple
37 * and easy to read.
38 *
39 * Radix trees are quick on lookups, and reasonably efficient in terms of
40 * storage, especially with high density usages such as this framework
41 * requires (a continuous range of integer keys, beginning with zero, is
42 * used as the ID's of the hwspinlock instances).
43 *
44 * The radix tree API supports tagging items in the tree, which this
45 * framework uses to mark unused hwspinlock instances (see the
46 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
47 * tree, looking for an unused hwspinlock instance, is now reduced to a
48 * single radix tree API call.
49 */
50static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
51
52/*
53 * Synchronization of access to the tree is achieved using this mutex,
54 * as the radix-tree API requires that users provide all synchronisation.
55 * A mutex is needed because we're using non-atomic radix tree allocations.
56 */
57static DEFINE_MUTEX(hwspinlock_tree_lock);
58
59
60/**
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock
62 * @hwlock: an hwspinlock which we want to trylock
63 * @mode: controls whether local interrupts are disabled or not
64 * @flags: a pointer where the caller's interrupt state will be saved at (if
65 * requested)
66 *
67 * This function attempts to lock an hwspinlock, and will immediately
68 * fail if the hwspinlock is already taken.
69 *
70 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
71 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
72 * user need some time-consuming or sleepable operations under the hardware
73 * lock, they need one sleepable lock (like mutex) to protect the operations.
74 *
75 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
76 * return from this function, preemption (and possibly interrupts) is disabled,
77 * so the caller must not sleep, and is advised to release the hwspinlock as
78 * soon as possible. This is required in order to minimize remote cores polling
79 * on the hardware interconnect.
80 *
81 * The user decides whether local interrupts are disabled or not, and if yes,
82 * whether he wants their previous state to be saved. It is up to the user
83 * to choose the appropriate @mode of operation, exactly the same way users
84 * should decide between spin_trylock, spin_trylock_irq and
85 * spin_trylock_irqsave.
86 *
87 * Returns: %0 if we successfully locked the hwspinlock or -EBUSY if
88 * the hwspinlock was already taken.
89 *
90 * This function will never sleep.
91 */
92int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
93{
94 int ret;
95
96 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
97 return -EINVAL;
98
99 /*
100 * This spin_lock{_irq, _irqsave} serves three purposes:
101 *
102 * 1. Disable preemption, in order to minimize the period of time
103 * in which the hwspinlock is taken. This is important in order
104 * to minimize the possible polling on the hardware interconnect
105 * by a remote user of this lock.
106 * 2. Make the hwspinlock SMP-safe (so we can take it from
107 * additional contexts on the local host).
108 * 3. Ensure that in_atomic/might_sleep checks catch potential
109 * problems with hwspinlock usage (e.g. scheduler checks like
110 * 'scheduling while atomic' etc.)
111 */
112 switch (mode) {
113 case HWLOCK_IRQSTATE:
114 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
115 break;
116 case HWLOCK_IRQ:
117 ret = spin_trylock_irq(&hwlock->lock);
118 break;
119 case HWLOCK_RAW:
120 case HWLOCK_IN_ATOMIC:
121 ret = 1;
122 break;
123 default:
124 ret = spin_trylock(&hwlock->lock);
125 break;
126 }
127
128 /* is lock already taken by another context on the local cpu ? */
129 if (!ret)
130 return -EBUSY;
131
132 /* try to take the hwspinlock device */
133 ret = hwlock->bank->ops->trylock(hwlock);
134
135 /* if hwlock is already taken, undo spin_trylock_* and exit */
136 if (!ret) {
137 switch (mode) {
138 case HWLOCK_IRQSTATE:
139 spin_unlock_irqrestore(&hwlock->lock, *flags);
140 break;
141 case HWLOCK_IRQ:
142 spin_unlock_irq(&hwlock->lock);
143 break;
144 case HWLOCK_RAW:
145 case HWLOCK_IN_ATOMIC:
146 /* Nothing to do */
147 break;
148 default:
149 spin_unlock(&hwlock->lock);
150 break;
151 }
152
153 return -EBUSY;
154 }
155
156 /*
157 * We can be sure the other core's memory operations
158 * are observable to us only _after_ we successfully take
159 * the hwspinlock, and we must make sure that subsequent memory
160 * operations (both reads and writes) will not be reordered before
161 * we actually took the hwspinlock.
162 *
163 * Note: the implicit memory barrier of the spinlock above is too
164 * early, so we need this additional explicit memory barrier.
165 */
166 mb();
167
168 return 0;
169}
170EXPORT_SYMBOL_GPL(__hwspin_trylock);
171
172/**
173 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
174 * @hwlock: the hwspinlock to be locked
175 * @to: timeout value in msecs
176 * @mode: mode which controls whether local interrupts are disabled or not
177 * @flags: a pointer to where the caller's interrupt state will be saved at (if
178 * requested)
179 *
180 * This function locks the given @hwlock. If the @hwlock
181 * is already taken, the function will busy loop waiting for it to
182 * be released, but give up after @timeout msecs have elapsed.
183 *
184 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
185 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
186 * user need some time-consuming or sleepable operations under the hardware
187 * lock, they need one sleepable lock (like mutex) to protect the operations.
188 *
189 * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
190 * is handled with busy-waiting delays, hence shall not exceed few msecs.
191 *
192 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
193 * return from this function, preemption (and possibly interrupts) is disabled,
194 * so the caller must not sleep, and is advised to release the hwspinlock as
195 * soon as possible. This is required in order to minimize remote cores polling
196 * on the hardware interconnect.
197 *
198 * The user decides whether local interrupts are disabled or not, and if yes,
199 * whether he wants their previous state to be saved. It is up to the user
200 * to choose the appropriate @mode of operation, exactly the same way users
201 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
202 *
203 * Returns: %0 when the @hwlock was successfully taken, and an appropriate
204 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
205 * busy after @timeout msecs).
206 *
207 * The function will never sleep.
208 */
209int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
210 int mode, unsigned long *flags)
211{
212 int ret;
213 unsigned long expire, atomic_delay = 0;
214
215 expire = msecs_to_jiffies(to) + jiffies;
216
217 for (;;) {
218 /* Try to take the hwspinlock */
219 ret = __hwspin_trylock(hwlock, mode, flags);
220 if (ret != -EBUSY)
221 break;
222
223 /*
224 * The lock is already taken, let's check if the user wants
225 * us to try again
226 */
227 if (mode == HWLOCK_IN_ATOMIC) {
228 udelay(HWSPINLOCK_RETRY_DELAY_US);
229 atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
230 if (atomic_delay > to * 1000)
231 return -ETIMEDOUT;
232 } else {
233 if (time_is_before_eq_jiffies(expire))
234 return -ETIMEDOUT;
235 }
236
237 /*
238 * Allow platform-specific relax handlers to prevent
239 * hogging the interconnect (no sleeping, though)
240 */
241 if (hwlock->bank->ops->relax)
242 hwlock->bank->ops->relax(hwlock);
243 }
244
245 return ret;
246}
247EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
248
249/**
250 * __hwspin_unlock() - unlock a specific hwspinlock
251 * @hwlock: a previously-acquired hwspinlock which we want to unlock
252 * @mode: controls whether local interrupts needs to be restored or not
253 * @flags: previous caller's interrupt state to restore (if requested)
254 *
255 * This function will unlock a specific hwspinlock, enable preemption and
256 * (possibly) enable interrupts or restore their previous state.
257 * @hwlock must be already locked before calling this function: it is a bug
258 * to call unlock on a @hwlock that is already unlocked.
259 *
260 * The user decides whether local interrupts should be enabled or not, and
261 * if yes, whether he wants their previous state to be restored. It is up
262 * to the user to choose the appropriate @mode of operation, exactly the
263 * same way users decide between spin_unlock, spin_unlock_irq and
264 * spin_unlock_irqrestore.
265 *
266 * The function will never sleep.
267 */
268void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
269{
270 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
271 return;
272
273 /*
274 * We must make sure that memory operations (both reads and writes),
275 * done before unlocking the hwspinlock, will not be reordered
276 * after the lock is released.
277 *
278 * That's the purpose of this explicit memory barrier.
279 *
280 * Note: the memory barrier induced by the spin_unlock below is too
281 * late; the other core is going to access memory soon after it will
282 * take the hwspinlock, and by then we want to be sure our memory
283 * operations are already observable.
284 */
285 mb();
286
287 hwlock->bank->ops->unlock(hwlock);
288
289 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
290 switch (mode) {
291 case HWLOCK_IRQSTATE:
292 spin_unlock_irqrestore(&hwlock->lock, *flags);
293 break;
294 case HWLOCK_IRQ:
295 spin_unlock_irq(&hwlock->lock);
296 break;
297 case HWLOCK_RAW:
298 case HWLOCK_IN_ATOMIC:
299 /* Nothing to do */
300 break;
301 default:
302 spin_unlock(&hwlock->lock);
303 break;
304 }
305}
306EXPORT_SYMBOL_GPL(__hwspin_unlock);
307
308/**
309 * hwspin_lock_bust() - bust a specific hwspinlock
310 * @hwlock: a previously-acquired hwspinlock which we want to bust
311 * @id: identifier of the remote lock holder, if applicable
312 *
313 * This function will bust a hwspinlock that was previously acquired as
314 * long as the current owner of the lock matches the id given by the caller.
315 *
316 * Context: Process context.
317 *
318 * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
319 * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
320 * defined for the hwspinlock.
321 */
322int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
323{
324 if (WARN_ON(!hwlock))
325 return -EINVAL;
326
327 if (!hwlock->bank->ops->bust) {
328 pr_err("bust operation not defined\n");
329 return -EOPNOTSUPP;
330 }
331
332 return hwlock->bank->ops->bust(hwlock, id);
333}
334EXPORT_SYMBOL_GPL(hwspin_lock_bust);
335
336/**
337 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
338 * @hwlock_spec: hwlock specifier as found in the device tree
339 *
340 * This is a simple translation function, suitable for hwspinlock platform
341 * drivers that only has a lock specifier length of 1.
342 *
343 * Returns: a relative index of the lock within a specified bank on success,
344 * or -EINVAL on invalid specifier cell count.
345 */
346static inline int
347of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
348{
349 if (WARN_ON(hwlock_spec->args_count != 1))
350 return -EINVAL;
351
352 return hwlock_spec->args[0];
353}
354
355/**
356 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
357 * @np: device node from which to request the specific hwlock
358 * @index: index of the hwlock in the list of values
359 *
360 * This function provides a means for DT users of the hwspinlock module to
361 * get the global lock id of a specific hwspinlock using the phandle of the
362 * hwspinlock device, so that it can be requested using the normal
363 * hwspin_lock_request_specific() API.
364 *
365 * Returns: the global lock id number on success, -EPROBE_DEFER if the
366 * hwspinlock device is not yet registered, -EINVAL on invalid args
367 * specifier value or an appropriate error as returned from the OF parsing
368 * of the DT client node.
369 */
370int of_hwspin_lock_get_id(struct device_node *np, int index)
371{
372 struct of_phandle_args args;
373 struct hwspinlock *hwlock;
374 struct radix_tree_iter iter;
375 void **slot;
376 int id;
377 int ret;
378
379 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
380 &args);
381 if (ret)
382 return ret;
383
384 if (!of_device_is_available(args.np)) {
385 ret = -ENOENT;
386 goto out;
387 }
388
389 /* Find the hwspinlock device: we need its base_id */
390 ret = -EPROBE_DEFER;
391 rcu_read_lock();
392 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
393 hwlock = radix_tree_deref_slot(slot);
394 if (unlikely(!hwlock))
395 continue;
396 if (radix_tree_deref_retry(hwlock)) {
397 slot = radix_tree_iter_retry(&iter);
398 continue;
399 }
400
401 if (device_match_of_node(hwlock->bank->dev, args.np)) {
402 ret = 0;
403 break;
404 }
405 }
406 rcu_read_unlock();
407 if (ret < 0)
408 goto out;
409
410 id = of_hwspin_lock_simple_xlate(&args);
411 if (id < 0 || id >= hwlock->bank->num_locks) {
412 ret = -EINVAL;
413 goto out;
414 }
415 id += hwlock->bank->base_id;
416
417out:
418 of_node_put(args.np);
419 return ret ? ret : id;
420}
421EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
422
423/**
424 * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
425 * @np: device node from which to request the specific hwlock
426 * @name: hwlock name
427 *
428 * This function provides a means for DT users of the hwspinlock module to
429 * get the global lock id of a specific hwspinlock using the specified name of
430 * the hwspinlock device, so that it can be requested using the normal
431 * hwspin_lock_request_specific() API.
432 *
433 * Returns: the global lock id number on success, -EPROBE_DEFER if the
434 * hwspinlock device is not yet registered, -EINVAL on invalid args
435 * specifier value or an appropriate error as returned from the OF parsing
436 * of the DT client node.
437 */
438int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
439{
440 int index;
441
442 if (!name)
443 return -EINVAL;
444
445 index = of_property_match_string(np, "hwlock-names", name);
446 if (index < 0)
447 return index;
448
449 return of_hwspin_lock_get_id(np, index);
450}
451EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
452
453static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
454{
455 struct hwspinlock *tmp;
456 int ret;
457
458 mutex_lock(&hwspinlock_tree_lock);
459
460 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
461 if (ret) {
462 if (ret == -EEXIST)
463 pr_err("hwspinlock id %d already exists!\n", id);
464 goto out;
465 }
466
467 /* mark this hwspinlock as available */
468 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
469
470 /* self-sanity check which should never fail */
471 WARN_ON(tmp != hwlock);
472
473out:
474 mutex_unlock(&hwspinlock_tree_lock);
475 return 0;
476}
477
478static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
479{
480 struct hwspinlock *hwlock = NULL;
481 int ret;
482
483 mutex_lock(&hwspinlock_tree_lock);
484
485 /* make sure the hwspinlock is not in use (tag is set) */
486 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
487 if (ret == 0) {
488 pr_err("hwspinlock %d still in use (or not present)\n", id);
489 goto out;
490 }
491
492 hwlock = radix_tree_delete(&hwspinlock_tree, id);
493 if (!hwlock) {
494 pr_err("failed to delete hwspinlock %d\n", id);
495 goto out;
496 }
497
498out:
499 mutex_unlock(&hwspinlock_tree_lock);
500 return hwlock;
501}
502
503/**
504 * hwspin_lock_register() - register a new hw spinlock device
505 * @bank: the hwspinlock device, which usually provides numerous hw locks
506 * @dev: the backing device
507 * @ops: hwspinlock handlers for this device
508 * @base_id: id of the first hardware spinlock in this bank
509 * @num_locks: number of hwspinlocks provided by this device
510 *
511 * This function should be called from the underlying platform-specific
512 * implementation, to register a new hwspinlock device instance.
513 *
514 * Should be called from a process context (might sleep)
515 *
516 * Returns: %0 on success, or an appropriate error code on failure
517 */
518int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
519 const struct hwspinlock_ops *ops, int base_id, int num_locks)
520{
521 struct hwspinlock *hwlock;
522 int ret = 0, i;
523
524 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
525 !ops->unlock) {
526 pr_err("invalid parameters\n");
527 return -EINVAL;
528 }
529
530 bank->dev = dev;
531 bank->ops = ops;
532 bank->base_id = base_id;
533 bank->num_locks = num_locks;
534
535 for (i = 0; i < num_locks; i++) {
536 hwlock = &bank->lock[i];
537
538 spin_lock_init(&hwlock->lock);
539 hwlock->bank = bank;
540
541 ret = hwspin_lock_register_single(hwlock, base_id + i);
542 if (ret)
543 goto reg_failed;
544 }
545
546 return 0;
547
548reg_failed:
549 while (--i >= 0)
550 hwspin_lock_unregister_single(base_id + i);
551 return ret;
552}
553EXPORT_SYMBOL_GPL(hwspin_lock_register);
554
555/**
556 * hwspin_lock_unregister() - unregister an hw spinlock device
557 * @bank: the hwspinlock device, which usually provides numerous hw locks
558 *
559 * This function should be called from the underlying platform-specific
560 * implementation, to unregister an existing (and unused) hwspinlock.
561 *
562 * Should be called from a process context (might sleep)
563 *
564 * Returns: %0 on success, or an appropriate error code on failure
565 */
566int hwspin_lock_unregister(struct hwspinlock_device *bank)
567{
568 struct hwspinlock *hwlock, *tmp;
569 int i;
570
571 for (i = 0; i < bank->num_locks; i++) {
572 hwlock = &bank->lock[i];
573
574 tmp = hwspin_lock_unregister_single(bank->base_id + i);
575 if (!tmp)
576 return -EBUSY;
577
578 /* self-sanity check that should never fail */
579 WARN_ON(tmp != hwlock);
580 }
581
582 return 0;
583}
584EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
585
586static void devm_hwspin_lock_unreg(struct device *dev, void *res)
587{
588 hwspin_lock_unregister(*(struct hwspinlock_device **)res);
589}
590
591static int devm_hwspin_lock_device_match(struct device *dev, void *res,
592 void *data)
593{
594 struct hwspinlock_device **bank = res;
595
596 if (WARN_ON(!bank || !*bank))
597 return 0;
598
599 return *bank == data;
600}
601
602/**
603 * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
604 * a managed device
605 * @dev: the backing device
606 * @bank: the hwspinlock device, which usually provides numerous hw locks
607 *
608 * This function should be called from the underlying platform-specific
609 * implementation, to unregister an existing (and unused) hwspinlock.
610 *
611 * Should be called from a process context (might sleep)
612 *
613 * Returns: %0 on success, or an appropriate error code on failure
614 */
615int devm_hwspin_lock_unregister(struct device *dev,
616 struct hwspinlock_device *bank)
617{
618 int ret;
619
620 ret = devres_release(dev, devm_hwspin_lock_unreg,
621 devm_hwspin_lock_device_match, bank);
622 WARN_ON(ret);
623
624 return ret;
625}
626EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
627
628/**
629 * devm_hwspin_lock_register() - register a new hw spinlock device for
630 * a managed device
631 * @dev: the backing device
632 * @bank: the hwspinlock device, which usually provides numerous hw locks
633 * @ops: hwspinlock handlers for this device
634 * @base_id: id of the first hardware spinlock in this bank
635 * @num_locks: number of hwspinlocks provided by this device
636 *
637 * This function should be called from the underlying platform-specific
638 * implementation, to register a new hwspinlock device instance.
639 *
640 * Should be called from a process context (might sleep)
641 *
642 * Returns: %0 on success, or an appropriate error code on failure
643 */
644int devm_hwspin_lock_register(struct device *dev,
645 struct hwspinlock_device *bank,
646 const struct hwspinlock_ops *ops,
647 int base_id, int num_locks)
648{
649 struct hwspinlock_device **ptr;
650 int ret;
651
652 ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
653 if (!ptr)
654 return -ENOMEM;
655
656 ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
657 if (!ret) {
658 *ptr = bank;
659 devres_add(dev, ptr);
660 } else {
661 devres_free(ptr);
662 }
663
664 return ret;
665}
666EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
667
668/**
669 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
670 * @hwlock: the target hwspinlock
671 *
672 * This is an internal function that prepares an hwspinlock instance
673 * before it is given to the user. The function assumes that
674 * hwspinlock_tree_lock is taken.
675 *
676 * Returns: %0 or positive to indicate success, and a negative value to
677 * indicate an error (with the appropriate error code)
678 */
679static int __hwspin_lock_request(struct hwspinlock *hwlock)
680{
681 struct device *dev = hwlock->bank->dev;
682 struct hwspinlock *tmp;
683 int ret;
684
685 /* prevent underlying implementation from being removed */
686 if (!try_module_get(dev->driver->owner)) {
687 dev_err(dev, "%s: can't get owner\n", __func__);
688 return -EINVAL;
689 }
690
691 /* notify PM core that power is now needed */
692 ret = pm_runtime_get_sync(dev);
693 if (ret < 0 && ret != -EACCES) {
694 dev_err(dev, "%s: can't power on device\n", __func__);
695 pm_runtime_put_noidle(dev);
696 module_put(dev->driver->owner);
697 return ret;
698 }
699
700 ret = 0;
701
702 /* mark hwspinlock as used, should not fail */
703 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
704 HWSPINLOCK_UNUSED);
705
706 /* self-sanity check that should never fail */
707 WARN_ON(tmp != hwlock);
708
709 return ret;
710}
711
712/**
713 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
714 * @hwlock: a valid hwspinlock instance
715 *
716 * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
717 */
718int hwspin_lock_get_id(struct hwspinlock *hwlock)
719{
720 if (!hwlock) {
721 pr_err("invalid hwlock\n");
722 return -EINVAL;
723 }
724
725 return hwlock_to_id(hwlock);
726}
727EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
728
729/**
730 * hwspin_lock_request() - request an hwspinlock
731 *
732 * This function should be called by users of the hwspinlock device,
733 * in order to dynamically assign them an unused hwspinlock.
734 * Usually the user of this lock will then have to communicate the lock's id
735 * to the remote core before it can be used for synchronization (to get the
736 * id of a given hwlock, use hwspin_lock_get_id()).
737 *
738 * Should be called from a process context (might sleep)
739 *
740 * Returns: the address of the assigned hwspinlock, or %NULL on error
741 */
742struct hwspinlock *hwspin_lock_request(void)
743{
744 struct hwspinlock *hwlock;
745 int ret;
746
747 mutex_lock(&hwspinlock_tree_lock);
748
749 /* look for an unused lock */
750 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
751 0, 1, HWSPINLOCK_UNUSED);
752 if (ret == 0) {
753 pr_warn("a free hwspinlock is not available\n");
754 hwlock = NULL;
755 goto out;
756 }
757
758 /* sanity check that should never fail */
759 WARN_ON(ret > 1);
760
761 /* mark as used and power up */
762 ret = __hwspin_lock_request(hwlock);
763 if (ret < 0)
764 hwlock = NULL;
765
766out:
767 mutex_unlock(&hwspinlock_tree_lock);
768 return hwlock;
769}
770EXPORT_SYMBOL_GPL(hwspin_lock_request);
771
772/**
773 * hwspin_lock_request_specific() - request for a specific hwspinlock
774 * @id: index of the specific hwspinlock that is requested
775 *
776 * This function should be called by users of the hwspinlock module,
777 * in order to assign them a specific hwspinlock.
778 * Usually early board code will be calling this function in order to
779 * reserve specific hwspinlock ids for predefined purposes.
780 *
781 * Should be called from a process context (might sleep)
782 *
783 * Returns: the address of the assigned hwspinlock, or %NULL on error
784 */
785struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
786{
787 struct hwspinlock *hwlock;
788 int ret;
789
790 mutex_lock(&hwspinlock_tree_lock);
791
792 /* make sure this hwspinlock exists */
793 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
794 if (!hwlock) {
795 pr_warn("hwspinlock %u does not exist\n", id);
796 goto out;
797 }
798
799 /* sanity check (this shouldn't happen) */
800 WARN_ON(hwlock_to_id(hwlock) != id);
801
802 /* make sure this hwspinlock is unused */
803 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
804 if (ret == 0) {
805 pr_warn("hwspinlock %u is already in use\n", id);
806 hwlock = NULL;
807 goto out;
808 }
809
810 /* mark as used and power up */
811 ret = __hwspin_lock_request(hwlock);
812 if (ret < 0)
813 hwlock = NULL;
814
815out:
816 mutex_unlock(&hwspinlock_tree_lock);
817 return hwlock;
818}
819EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
820
821/**
822 * hwspin_lock_free() - free a specific hwspinlock
823 * @hwlock: the specific hwspinlock to free
824 *
825 * This function mark @hwlock as free again.
826 * Should only be called with an @hwlock that was retrieved from
827 * an earlier call to hwspin_lock_request{_specific}.
828 *
829 * Should be called from a process context (might sleep)
830 *
831 * Returns: %0 on success, or an appropriate error code on failure
832 */
833int hwspin_lock_free(struct hwspinlock *hwlock)
834{
835 struct device *dev;
836 struct hwspinlock *tmp;
837 int ret;
838
839 if (!hwlock) {
840 pr_err("invalid hwlock\n");
841 return -EINVAL;
842 }
843
844 dev = hwlock->bank->dev;
845 mutex_lock(&hwspinlock_tree_lock);
846
847 /* make sure the hwspinlock is used */
848 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
849 HWSPINLOCK_UNUSED);
850 if (ret == 1) {
851 dev_err(dev, "%s: hwlock is already free\n", __func__);
852 dump_stack();
853 ret = -EINVAL;
854 goto out;
855 }
856
857 /* notify the underlying device that power is not needed */
858 pm_runtime_put(dev);
859
860 /* mark this hwspinlock as available */
861 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
862 HWSPINLOCK_UNUSED);
863
864 /* sanity check (this shouldn't happen) */
865 WARN_ON(tmp != hwlock);
866
867 module_put(dev->driver->owner);
868
869out:
870 mutex_unlock(&hwspinlock_tree_lock);
871 return ret;
872}
873EXPORT_SYMBOL_GPL(hwspin_lock_free);
874
875static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
876{
877 struct hwspinlock **hwlock = res;
878
879 if (WARN_ON(!hwlock || !*hwlock))
880 return 0;
881
882 return *hwlock == data;
883}
884
885static void devm_hwspin_lock_release(struct device *dev, void *res)
886{
887 hwspin_lock_free(*(struct hwspinlock **)res);
888}
889
890/**
891 * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
892 * @dev: the device to free the specific hwspinlock
893 * @hwlock: the specific hwspinlock to free
894 *
895 * This function mark @hwlock as free again.
896 * Should only be called with an @hwlock that was retrieved from
897 * an earlier call to hwspin_lock_request{_specific}.
898 *
899 * Should be called from a process context (might sleep)
900 *
901 * Returns: %0 on success, or an appropriate error code on failure
902 */
903int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
904{
905 int ret;
906
907 ret = devres_release(dev, devm_hwspin_lock_release,
908 devm_hwspin_lock_match, hwlock);
909 WARN_ON(ret);
910
911 return ret;
912}
913EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
914
915/**
916 * devm_hwspin_lock_request() - request an hwspinlock for a managed device
917 * @dev: the device to request an hwspinlock
918 *
919 * This function should be called by users of the hwspinlock device,
920 * in order to dynamically assign them an unused hwspinlock.
921 * Usually the user of this lock will then have to communicate the lock's id
922 * to the remote core before it can be used for synchronization (to get the
923 * id of a given hwlock, use hwspin_lock_get_id()).
924 *
925 * Should be called from a process context (might sleep)
926 *
927 * Returns: the address of the assigned hwspinlock, or %NULL on error
928 */
929struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
930{
931 struct hwspinlock **ptr, *hwlock;
932
933 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
934 if (!ptr)
935 return NULL;
936
937 hwlock = hwspin_lock_request();
938 if (hwlock) {
939 *ptr = hwlock;
940 devres_add(dev, ptr);
941 } else {
942 devres_free(ptr);
943 }
944
945 return hwlock;
946}
947EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
948
949/**
950 * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
951 * a managed device
952 * @dev: the device to request the specific hwspinlock
953 * @id: index of the specific hwspinlock that is requested
954 *
955 * This function should be called by users of the hwspinlock module,
956 * in order to assign them a specific hwspinlock.
957 * Usually early board code will be calling this function in order to
958 * reserve specific hwspinlock ids for predefined purposes.
959 *
960 * Should be called from a process context (might sleep)
961 *
962 * Returns: the address of the assigned hwspinlock, or %NULL on error
963 */
964struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
965 unsigned int id)
966{
967 struct hwspinlock **ptr, *hwlock;
968
969 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
970 if (!ptr)
971 return NULL;
972
973 hwlock = hwspin_lock_request_specific(id);
974 if (hwlock) {
975 *ptr = hwlock;
976 devres_add(dev, ptr);
977 } else {
978 devres_free(ptr);
979 }
980
981 return hwlock;
982}
983EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
984
985MODULE_DESCRIPTION("Hardware spinlock interface");
986MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
1/*
2 * Hardware spinlock framework
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/jiffies.h>
26#include <linux/radix-tree.h>
27#include <linux/hwspinlock.h>
28#include <linux/pm_runtime.h>
29#include <linux/mutex.h>
30
31#include "hwspinlock_internal.h"
32
33/* radix tree tags */
34#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
35
36/*
37 * A radix tree is used to maintain the available hwspinlock instances.
38 * The tree associates hwspinlock pointers with their integer key id,
39 * and provides easy-to-use API which makes the hwspinlock core code simple
40 * and easy to read.
41 *
42 * Radix trees are quick on lookups, and reasonably efficient in terms of
43 * storage, especially with high density usages such as this framework
44 * requires (a continuous range of integer keys, beginning with zero, is
45 * used as the ID's of the hwspinlock instances).
46 *
47 * The radix tree API supports tagging items in the tree, which this
48 * framework uses to mark unused hwspinlock instances (see the
49 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
50 * tree, looking for an unused hwspinlock instance, is now reduced to a
51 * single radix tree API call.
52 */
53static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
54
55/*
56 * Synchronization of access to the tree is achieved using this mutex,
57 * as the radix-tree API requires that users provide all synchronisation.
58 * A mutex is needed because we're using non-atomic radix tree allocations.
59 */
60static DEFINE_MUTEX(hwspinlock_tree_lock);
61
62
63/**
64 * __hwspin_trylock() - attempt to lock a specific hwspinlock
65 * @hwlock: an hwspinlock which we want to trylock
66 * @mode: controls whether local interrupts are disabled or not
67 * @flags: a pointer where the caller's interrupt state will be saved at (if
68 * requested)
69 *
70 * This function attempts to lock an hwspinlock, and will immediately
71 * fail if the hwspinlock is already taken.
72 *
73 * Upon a successful return from this function, preemption (and possibly
74 * interrupts) is disabled, so the caller must not sleep, and is advised to
75 * release the hwspinlock as soon as possible. This is required in order to
76 * minimize remote cores polling on the hardware interconnect.
77 *
78 * The user decides whether local interrupts are disabled or not, and if yes,
79 * whether he wants their previous state to be saved. It is up to the user
80 * to choose the appropriate @mode of operation, exactly the same way users
81 * should decide between spin_trylock, spin_trylock_irq and
82 * spin_trylock_irqsave.
83 *
84 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
85 * the hwspinlock was already taken.
86 * This function will never sleep.
87 */
88int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
89{
90 int ret;
91
92 BUG_ON(!hwlock);
93 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
94
95 /*
96 * This spin_lock{_irq, _irqsave} serves three purposes:
97 *
98 * 1. Disable preemption, in order to minimize the period of time
99 * in which the hwspinlock is taken. This is important in order
100 * to minimize the possible polling on the hardware interconnect
101 * by a remote user of this lock.
102 * 2. Make the hwspinlock SMP-safe (so we can take it from
103 * additional contexts on the local host).
104 * 3. Ensure that in_atomic/might_sleep checks catch potential
105 * problems with hwspinlock usage (e.g. scheduler checks like
106 * 'scheduling while atomic' etc.)
107 */
108 if (mode == HWLOCK_IRQSTATE)
109 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
110 else if (mode == HWLOCK_IRQ)
111 ret = spin_trylock_irq(&hwlock->lock);
112 else
113 ret = spin_trylock(&hwlock->lock);
114
115 /* is lock already taken by another context on the local cpu ? */
116 if (!ret)
117 return -EBUSY;
118
119 /* try to take the hwspinlock device */
120 ret = hwlock->bank->ops->trylock(hwlock);
121
122 /* if hwlock is already taken, undo spin_trylock_* and exit */
123 if (!ret) {
124 if (mode == HWLOCK_IRQSTATE)
125 spin_unlock_irqrestore(&hwlock->lock, *flags);
126 else if (mode == HWLOCK_IRQ)
127 spin_unlock_irq(&hwlock->lock);
128 else
129 spin_unlock(&hwlock->lock);
130
131 return -EBUSY;
132 }
133
134 /*
135 * We can be sure the other core's memory operations
136 * are observable to us only _after_ we successfully take
137 * the hwspinlock, and we must make sure that subsequent memory
138 * operations (both reads and writes) will not be reordered before
139 * we actually took the hwspinlock.
140 *
141 * Note: the implicit memory barrier of the spinlock above is too
142 * early, so we need this additional explicit memory barrier.
143 */
144 mb();
145
146 return 0;
147}
148EXPORT_SYMBOL_GPL(__hwspin_trylock);
149
150/**
151 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
152 * @hwlock: the hwspinlock to be locked
153 * @timeout: timeout value in msecs
154 * @mode: mode which controls whether local interrupts are disabled or not
155 * @flags: a pointer to where the caller's interrupt state will be saved at (if
156 * requested)
157 *
158 * This function locks the given @hwlock. If the @hwlock
159 * is already taken, the function will busy loop waiting for it to
160 * be released, but give up after @timeout msecs have elapsed.
161 *
162 * Upon a successful return from this function, preemption is disabled
163 * (and possibly local interrupts, too), so the caller must not sleep,
164 * and is advised to release the hwspinlock as soon as possible.
165 * This is required in order to minimize remote cores polling on the
166 * hardware interconnect.
167 *
168 * The user decides whether local interrupts are disabled or not, and if yes,
169 * whether he wants their previous state to be saved. It is up to the user
170 * to choose the appropriate @mode of operation, exactly the same way users
171 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
172 *
173 * Returns 0 when the @hwlock was successfully taken, and an appropriate
174 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
175 * busy after @timeout msecs). The function will never sleep.
176 */
177int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
178 int mode, unsigned long *flags)
179{
180 int ret;
181 unsigned long expire;
182
183 expire = msecs_to_jiffies(to) + jiffies;
184
185 for (;;) {
186 /* Try to take the hwspinlock */
187 ret = __hwspin_trylock(hwlock, mode, flags);
188 if (ret != -EBUSY)
189 break;
190
191 /*
192 * The lock is already taken, let's check if the user wants
193 * us to try again
194 */
195 if (time_is_before_eq_jiffies(expire))
196 return -ETIMEDOUT;
197
198 /*
199 * Allow platform-specific relax handlers to prevent
200 * hogging the interconnect (no sleeping, though)
201 */
202 if (hwlock->bank->ops->relax)
203 hwlock->bank->ops->relax(hwlock);
204 }
205
206 return ret;
207}
208EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
209
210/**
211 * __hwspin_unlock() - unlock a specific hwspinlock
212 * @hwlock: a previously-acquired hwspinlock which we want to unlock
213 * @mode: controls whether local interrupts needs to be restored or not
214 * @flags: previous caller's interrupt state to restore (if requested)
215 *
216 * This function will unlock a specific hwspinlock, enable preemption and
217 * (possibly) enable interrupts or restore their previous state.
218 * @hwlock must be already locked before calling this function: it is a bug
219 * to call unlock on a @hwlock that is already unlocked.
220 *
221 * The user decides whether local interrupts should be enabled or not, and
222 * if yes, whether he wants their previous state to be restored. It is up
223 * to the user to choose the appropriate @mode of operation, exactly the
224 * same way users decide between spin_unlock, spin_unlock_irq and
225 * spin_unlock_irqrestore.
226 *
227 * The function will never sleep.
228 */
229void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
230{
231 BUG_ON(!hwlock);
232 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
233
234 /*
235 * We must make sure that memory operations (both reads and writes),
236 * done before unlocking the hwspinlock, will not be reordered
237 * after the lock is released.
238 *
239 * That's the purpose of this explicit memory barrier.
240 *
241 * Note: the memory barrier induced by the spin_unlock below is too
242 * late; the other core is going to access memory soon after it will
243 * take the hwspinlock, and by then we want to be sure our memory
244 * operations are already observable.
245 */
246 mb();
247
248 hwlock->bank->ops->unlock(hwlock);
249
250 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
251 if (mode == HWLOCK_IRQSTATE)
252 spin_unlock_irqrestore(&hwlock->lock, *flags);
253 else if (mode == HWLOCK_IRQ)
254 spin_unlock_irq(&hwlock->lock);
255 else
256 spin_unlock(&hwlock->lock);
257}
258EXPORT_SYMBOL_GPL(__hwspin_unlock);
259
260static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
261{
262 struct hwspinlock *tmp;
263 int ret;
264
265 mutex_lock(&hwspinlock_tree_lock);
266
267 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
268 if (ret) {
269 if (ret == -EEXIST)
270 pr_err("hwspinlock id %d already exists!\n", id);
271 goto out;
272 }
273
274 /* mark this hwspinlock as available */
275 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
276
277 /* self-sanity check which should never fail */
278 WARN_ON(tmp != hwlock);
279
280out:
281 mutex_unlock(&hwspinlock_tree_lock);
282 return 0;
283}
284
285static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
286{
287 struct hwspinlock *hwlock = NULL;
288 int ret;
289
290 mutex_lock(&hwspinlock_tree_lock);
291
292 /* make sure the hwspinlock is not in use (tag is set) */
293 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
294 if (ret == 0) {
295 pr_err("hwspinlock %d still in use (or not present)\n", id);
296 goto out;
297 }
298
299 hwlock = radix_tree_delete(&hwspinlock_tree, id);
300 if (!hwlock) {
301 pr_err("failed to delete hwspinlock %d\n", id);
302 goto out;
303 }
304
305out:
306 mutex_unlock(&hwspinlock_tree_lock);
307 return hwlock;
308}
309
310/**
311 * hwspin_lock_register() - register a new hw spinlock device
312 * @bank: the hwspinlock device, which usually provides numerous hw locks
313 * @dev: the backing device
314 * @ops: hwspinlock handlers for this device
315 * @base_id: id of the first hardware spinlock in this bank
316 * @num_locks: number of hwspinlocks provided by this device
317 *
318 * This function should be called from the underlying platform-specific
319 * implementation, to register a new hwspinlock device instance.
320 *
321 * Should be called from a process context (might sleep)
322 *
323 * Returns 0 on success, or an appropriate error code on failure
324 */
325int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
326 const struct hwspinlock_ops *ops, int base_id, int num_locks)
327{
328 struct hwspinlock *hwlock;
329 int ret = 0, i;
330
331 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
332 !ops->unlock) {
333 pr_err("invalid parameters\n");
334 return -EINVAL;
335 }
336
337 bank->dev = dev;
338 bank->ops = ops;
339 bank->base_id = base_id;
340 bank->num_locks = num_locks;
341
342 for (i = 0; i < num_locks; i++) {
343 hwlock = &bank->lock[i];
344
345 spin_lock_init(&hwlock->lock);
346 hwlock->bank = bank;
347
348 ret = hwspin_lock_register_single(hwlock, base_id + i);
349 if (ret)
350 goto reg_failed;
351 }
352
353 return 0;
354
355reg_failed:
356 while (--i >= 0)
357 hwspin_lock_unregister_single(base_id + i);
358 return ret;
359}
360EXPORT_SYMBOL_GPL(hwspin_lock_register);
361
362/**
363 * hwspin_lock_unregister() - unregister an hw spinlock device
364 * @bank: the hwspinlock device, which usually provides numerous hw locks
365 *
366 * This function should be called from the underlying platform-specific
367 * implementation, to unregister an existing (and unused) hwspinlock.
368 *
369 * Should be called from a process context (might sleep)
370 *
371 * Returns 0 on success, or an appropriate error code on failure
372 */
373int hwspin_lock_unregister(struct hwspinlock_device *bank)
374{
375 struct hwspinlock *hwlock, *tmp;
376 int i;
377
378 for (i = 0; i < bank->num_locks; i++) {
379 hwlock = &bank->lock[i];
380
381 tmp = hwspin_lock_unregister_single(bank->base_id + i);
382 if (!tmp)
383 return -EBUSY;
384
385 /* self-sanity check that should never fail */
386 WARN_ON(tmp != hwlock);
387 }
388
389 return 0;
390}
391EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
392
393/**
394 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
395 *
396 * This is an internal function that prepares an hwspinlock instance
397 * before it is given to the user. The function assumes that
398 * hwspinlock_tree_lock is taken.
399 *
400 * Returns 0 or positive to indicate success, and a negative value to
401 * indicate an error (with the appropriate error code)
402 */
403static int __hwspin_lock_request(struct hwspinlock *hwlock)
404{
405 struct device *dev = hwlock->bank->dev;
406 struct hwspinlock *tmp;
407 int ret;
408
409 /* prevent underlying implementation from being removed */
410 if (!try_module_get(dev->driver->owner)) {
411 dev_err(dev, "%s: can't get owner\n", __func__);
412 return -EINVAL;
413 }
414
415 /* notify PM core that power is now needed */
416 ret = pm_runtime_get_sync(dev);
417 if (ret < 0) {
418 dev_err(dev, "%s: can't power on device\n", __func__);
419 return ret;
420 }
421
422 /* mark hwspinlock as used, should not fail */
423 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
424 HWSPINLOCK_UNUSED);
425
426 /* self-sanity check that should never fail */
427 WARN_ON(tmp != hwlock);
428
429 return ret;
430}
431
432/**
433 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
434 * @hwlock: a valid hwspinlock instance
435 *
436 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
437 */
438int hwspin_lock_get_id(struct hwspinlock *hwlock)
439{
440 if (!hwlock) {
441 pr_err("invalid hwlock\n");
442 return -EINVAL;
443 }
444
445 return hwlock_to_id(hwlock);
446}
447EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
448
449/**
450 * hwspin_lock_request() - request an hwspinlock
451 *
452 * This function should be called by users of the hwspinlock device,
453 * in order to dynamically assign them an unused hwspinlock.
454 * Usually the user of this lock will then have to communicate the lock's id
455 * to the remote core before it can be used for synchronization (to get the
456 * id of a given hwlock, use hwspin_lock_get_id()).
457 *
458 * Should be called from a process context (might sleep)
459 *
460 * Returns the address of the assigned hwspinlock, or NULL on error
461 */
462struct hwspinlock *hwspin_lock_request(void)
463{
464 struct hwspinlock *hwlock;
465 int ret;
466
467 mutex_lock(&hwspinlock_tree_lock);
468
469 /* look for an unused lock */
470 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
471 0, 1, HWSPINLOCK_UNUSED);
472 if (ret == 0) {
473 pr_warn("a free hwspinlock is not available\n");
474 hwlock = NULL;
475 goto out;
476 }
477
478 /* sanity check that should never fail */
479 WARN_ON(ret > 1);
480
481 /* mark as used and power up */
482 ret = __hwspin_lock_request(hwlock);
483 if (ret < 0)
484 hwlock = NULL;
485
486out:
487 mutex_unlock(&hwspinlock_tree_lock);
488 return hwlock;
489}
490EXPORT_SYMBOL_GPL(hwspin_lock_request);
491
492/**
493 * hwspin_lock_request_specific() - request for a specific hwspinlock
494 * @id: index of the specific hwspinlock that is requested
495 *
496 * This function should be called by users of the hwspinlock module,
497 * in order to assign them a specific hwspinlock.
498 * Usually early board code will be calling this function in order to
499 * reserve specific hwspinlock ids for predefined purposes.
500 *
501 * Should be called from a process context (might sleep)
502 *
503 * Returns the address of the assigned hwspinlock, or NULL on error
504 */
505struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
506{
507 struct hwspinlock *hwlock;
508 int ret;
509
510 mutex_lock(&hwspinlock_tree_lock);
511
512 /* make sure this hwspinlock exists */
513 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
514 if (!hwlock) {
515 pr_warn("hwspinlock %u does not exist\n", id);
516 goto out;
517 }
518
519 /* sanity check (this shouldn't happen) */
520 WARN_ON(hwlock_to_id(hwlock) != id);
521
522 /* make sure this hwspinlock is unused */
523 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
524 if (ret == 0) {
525 pr_warn("hwspinlock %u is already in use\n", id);
526 hwlock = NULL;
527 goto out;
528 }
529
530 /* mark as used and power up */
531 ret = __hwspin_lock_request(hwlock);
532 if (ret < 0)
533 hwlock = NULL;
534
535out:
536 mutex_unlock(&hwspinlock_tree_lock);
537 return hwlock;
538}
539EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
540
541/**
542 * hwspin_lock_free() - free a specific hwspinlock
543 * @hwlock: the specific hwspinlock to free
544 *
545 * This function mark @hwlock as free again.
546 * Should only be called with an @hwlock that was retrieved from
547 * an earlier call to omap_hwspin_lock_request{_specific}.
548 *
549 * Should be called from a process context (might sleep)
550 *
551 * Returns 0 on success, or an appropriate error code on failure
552 */
553int hwspin_lock_free(struct hwspinlock *hwlock)
554{
555 struct device *dev = hwlock->bank->dev;
556 struct hwspinlock *tmp;
557 int ret;
558
559 if (!hwlock) {
560 pr_err("invalid hwlock\n");
561 return -EINVAL;
562 }
563
564 mutex_lock(&hwspinlock_tree_lock);
565
566 /* make sure the hwspinlock is used */
567 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
568 HWSPINLOCK_UNUSED);
569 if (ret == 1) {
570 dev_err(dev, "%s: hwlock is already free\n", __func__);
571 dump_stack();
572 ret = -EINVAL;
573 goto out;
574 }
575
576 /* notify the underlying device that power is not needed */
577 ret = pm_runtime_put(dev);
578 if (ret < 0)
579 goto out;
580
581 /* mark this hwspinlock as available */
582 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
583 HWSPINLOCK_UNUSED);
584
585 /* sanity check (this shouldn't happen) */
586 WARN_ON(tmp != hwlock);
587
588 module_put(dev->driver->owner);
589
590out:
591 mutex_unlock(&hwspinlock_tree_lock);
592 return ret;
593}
594EXPORT_SYMBOL_GPL(hwspin_lock_free);
595
596MODULE_LICENSE("GPL v2");
597MODULE_DESCRIPTION("Hardware spinlock interface");
598MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");