Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Hardware spinlock framework
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8 */
9
10#define pr_fmt(fmt) "%s: " fmt, __func__
11
12#include <linux/delay.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/err.h>
18#include <linux/jiffies.h>
19#include <linux/radix-tree.h>
20#include <linux/hwspinlock.h>
21#include <linux/pm_runtime.h>
22#include <linux/mutex.h>
23#include <linux/of.h>
24
25#include "hwspinlock_internal.h"
26
27/* retry delay used in atomic context */
28#define HWSPINLOCK_RETRY_DELAY_US 100
29
30/* radix tree tags */
31#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
32
33/*
34 * A radix tree is used to maintain the available hwspinlock instances.
35 * The tree associates hwspinlock pointers with their integer key id,
36 * and provides easy-to-use API which makes the hwspinlock core code simple
37 * and easy to read.
38 *
39 * Radix trees are quick on lookups, and reasonably efficient in terms of
40 * storage, especially with high density usages such as this framework
41 * requires (a continuous range of integer keys, beginning with zero, is
42 * used as the ID's of the hwspinlock instances).
43 *
44 * The radix tree API supports tagging items in the tree, which this
45 * framework uses to mark unused hwspinlock instances (see the
46 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
47 * tree, looking for an unused hwspinlock instance, is now reduced to a
48 * single radix tree API call.
49 */
50static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
51
52/*
53 * Synchronization of access to the tree is achieved using this mutex,
54 * as the radix-tree API requires that users provide all synchronisation.
55 * A mutex is needed because we're using non-atomic radix tree allocations.
56 */
57static DEFINE_MUTEX(hwspinlock_tree_lock);
58
59
60/**
61 * __hwspin_trylock() - attempt to lock a specific hwspinlock
62 * @hwlock: an hwspinlock which we want to trylock
63 * @mode: controls whether local interrupts are disabled or not
64 * @flags: a pointer where the caller's interrupt state will be saved at (if
65 * requested)
66 *
67 * This function attempts to lock an hwspinlock, and will immediately
68 * fail if the hwspinlock is already taken.
69 *
70 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
71 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
72 * user need some time-consuming or sleepable operations under the hardware
73 * lock, they need one sleepable lock (like mutex) to protect the operations.
74 *
75 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
76 * return from this function, preemption (and possibly interrupts) is disabled,
77 * so the caller must not sleep, and is advised to release the hwspinlock as
78 * soon as possible. This is required in order to minimize remote cores polling
79 * on the hardware interconnect.
80 *
81 * The user decides whether local interrupts are disabled or not, and if yes,
82 * whether he wants their previous state to be saved. It is up to the user
83 * to choose the appropriate @mode of operation, exactly the same way users
84 * should decide between spin_trylock, spin_trylock_irq and
85 * spin_trylock_irqsave.
86 *
87 * Returns: %0 if we successfully locked the hwspinlock or -EBUSY if
88 * the hwspinlock was already taken.
89 *
90 * This function will never sleep.
91 */
92int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
93{
94 int ret;
95
96 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
97 return -EINVAL;
98
99 /*
100 * This spin_lock{_irq, _irqsave} serves three purposes:
101 *
102 * 1. Disable preemption, in order to minimize the period of time
103 * in which the hwspinlock is taken. This is important in order
104 * to minimize the possible polling on the hardware interconnect
105 * by a remote user of this lock.
106 * 2. Make the hwspinlock SMP-safe (so we can take it from
107 * additional contexts on the local host).
108 * 3. Ensure that in_atomic/might_sleep checks catch potential
109 * problems with hwspinlock usage (e.g. scheduler checks like
110 * 'scheduling while atomic' etc.)
111 */
112 switch (mode) {
113 case HWLOCK_IRQSTATE:
114 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
115 break;
116 case HWLOCK_IRQ:
117 ret = spin_trylock_irq(&hwlock->lock);
118 break;
119 case HWLOCK_RAW:
120 case HWLOCK_IN_ATOMIC:
121 ret = 1;
122 break;
123 default:
124 ret = spin_trylock(&hwlock->lock);
125 break;
126 }
127
128 /* is lock already taken by another context on the local cpu ? */
129 if (!ret)
130 return -EBUSY;
131
132 /* try to take the hwspinlock device */
133 ret = hwlock->bank->ops->trylock(hwlock);
134
135 /* if hwlock is already taken, undo spin_trylock_* and exit */
136 if (!ret) {
137 switch (mode) {
138 case HWLOCK_IRQSTATE:
139 spin_unlock_irqrestore(&hwlock->lock, *flags);
140 break;
141 case HWLOCK_IRQ:
142 spin_unlock_irq(&hwlock->lock);
143 break;
144 case HWLOCK_RAW:
145 case HWLOCK_IN_ATOMIC:
146 /* Nothing to do */
147 break;
148 default:
149 spin_unlock(&hwlock->lock);
150 break;
151 }
152
153 return -EBUSY;
154 }
155
156 /*
157 * We can be sure the other core's memory operations
158 * are observable to us only _after_ we successfully take
159 * the hwspinlock, and we must make sure that subsequent memory
160 * operations (both reads and writes) will not be reordered before
161 * we actually took the hwspinlock.
162 *
163 * Note: the implicit memory barrier of the spinlock above is too
164 * early, so we need this additional explicit memory barrier.
165 */
166 mb();
167
168 return 0;
169}
170EXPORT_SYMBOL_GPL(__hwspin_trylock);
171
172/**
173 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
174 * @hwlock: the hwspinlock to be locked
175 * @to: timeout value in msecs
176 * @mode: mode which controls whether local interrupts are disabled or not
177 * @flags: a pointer to where the caller's interrupt state will be saved at (if
178 * requested)
179 *
180 * This function locks the given @hwlock. If the @hwlock
181 * is already taken, the function will busy loop waiting for it to
182 * be released, but give up after @timeout msecs have elapsed.
183 *
184 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
185 * of getting hardware lock with mutex or spinlock. Since in some scenarios,
186 * user need some time-consuming or sleepable operations under the hardware
187 * lock, they need one sleepable lock (like mutex) to protect the operations.
188 *
189 * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
190 * is handled with busy-waiting delays, hence shall not exceed few msecs.
191 *
192 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
193 * return from this function, preemption (and possibly interrupts) is disabled,
194 * so the caller must not sleep, and is advised to release the hwspinlock as
195 * soon as possible. This is required in order to minimize remote cores polling
196 * on the hardware interconnect.
197 *
198 * The user decides whether local interrupts are disabled or not, and if yes,
199 * whether he wants their previous state to be saved. It is up to the user
200 * to choose the appropriate @mode of operation, exactly the same way users
201 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
202 *
203 * Returns: %0 when the @hwlock was successfully taken, and an appropriate
204 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
205 * busy after @timeout msecs).
206 *
207 * The function will never sleep.
208 */
209int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
210 int mode, unsigned long *flags)
211{
212 int ret;
213 unsigned long expire, atomic_delay = 0;
214
215 expire = msecs_to_jiffies(to) + jiffies;
216
217 for (;;) {
218 /* Try to take the hwspinlock */
219 ret = __hwspin_trylock(hwlock, mode, flags);
220 if (ret != -EBUSY)
221 break;
222
223 /*
224 * The lock is already taken, let's check if the user wants
225 * us to try again
226 */
227 if (mode == HWLOCK_IN_ATOMIC) {
228 udelay(HWSPINLOCK_RETRY_DELAY_US);
229 atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
230 if (atomic_delay > to * 1000)
231 return -ETIMEDOUT;
232 } else {
233 if (time_is_before_eq_jiffies(expire))
234 return -ETIMEDOUT;
235 }
236
237 /*
238 * Allow platform-specific relax handlers to prevent
239 * hogging the interconnect (no sleeping, though)
240 */
241 if (hwlock->bank->ops->relax)
242 hwlock->bank->ops->relax(hwlock);
243 }
244
245 return ret;
246}
247EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
248
249/**
250 * __hwspin_unlock() - unlock a specific hwspinlock
251 * @hwlock: a previously-acquired hwspinlock which we want to unlock
252 * @mode: controls whether local interrupts needs to be restored or not
253 * @flags: previous caller's interrupt state to restore (if requested)
254 *
255 * This function will unlock a specific hwspinlock, enable preemption and
256 * (possibly) enable interrupts or restore their previous state.
257 * @hwlock must be already locked before calling this function: it is a bug
258 * to call unlock on a @hwlock that is already unlocked.
259 *
260 * The user decides whether local interrupts should be enabled or not, and
261 * if yes, whether he wants their previous state to be restored. It is up
262 * to the user to choose the appropriate @mode of operation, exactly the
263 * same way users decide between spin_unlock, spin_unlock_irq and
264 * spin_unlock_irqrestore.
265 *
266 * The function will never sleep.
267 */
268void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
269{
270 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
271 return;
272
273 /*
274 * We must make sure that memory operations (both reads and writes),
275 * done before unlocking the hwspinlock, will not be reordered
276 * after the lock is released.
277 *
278 * That's the purpose of this explicit memory barrier.
279 *
280 * Note: the memory barrier induced by the spin_unlock below is too
281 * late; the other core is going to access memory soon after it will
282 * take the hwspinlock, and by then we want to be sure our memory
283 * operations are already observable.
284 */
285 mb();
286
287 hwlock->bank->ops->unlock(hwlock);
288
289 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
290 switch (mode) {
291 case HWLOCK_IRQSTATE:
292 spin_unlock_irqrestore(&hwlock->lock, *flags);
293 break;
294 case HWLOCK_IRQ:
295 spin_unlock_irq(&hwlock->lock);
296 break;
297 case HWLOCK_RAW:
298 case HWLOCK_IN_ATOMIC:
299 /* Nothing to do */
300 break;
301 default:
302 spin_unlock(&hwlock->lock);
303 break;
304 }
305}
306EXPORT_SYMBOL_GPL(__hwspin_unlock);
307
308/**
309 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
310 * @hwlock_spec: hwlock specifier as found in the device tree
311 *
312 * This is a simple translation function, suitable for hwspinlock platform
313 * drivers that only has a lock specifier length of 1.
314 *
315 * Returns: a relative index of the lock within a specified bank on success,
316 * or -EINVAL on invalid specifier cell count.
317 */
318static inline int
319of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
320{
321 if (WARN_ON(hwlock_spec->args_count != 1))
322 return -EINVAL;
323
324 return hwlock_spec->args[0];
325}
326
327/**
328 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
329 * @np: device node from which to request the specific hwlock
330 * @index: index of the hwlock in the list of values
331 *
332 * This function provides a means for DT users of the hwspinlock module to
333 * get the global lock id of a specific hwspinlock using the phandle of the
334 * hwspinlock device, so that it can be requested using the normal
335 * hwspin_lock_request_specific() API.
336 *
337 * Returns: the global lock id number on success, -EPROBE_DEFER if the
338 * hwspinlock device is not yet registered, -EINVAL on invalid args
339 * specifier value or an appropriate error as returned from the OF parsing
340 * of the DT client node.
341 */
342int of_hwspin_lock_get_id(struct device_node *np, int index)
343{
344 struct of_phandle_args args;
345 struct hwspinlock *hwlock;
346 struct radix_tree_iter iter;
347 void **slot;
348 int id;
349 int ret;
350
351 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
352 &args);
353 if (ret)
354 return ret;
355
356 if (!of_device_is_available(args.np)) {
357 ret = -ENOENT;
358 goto out;
359 }
360
361 /* Find the hwspinlock device: we need its base_id */
362 ret = -EPROBE_DEFER;
363 rcu_read_lock();
364 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
365 hwlock = radix_tree_deref_slot(slot);
366 if (unlikely(!hwlock))
367 continue;
368 if (radix_tree_deref_retry(hwlock)) {
369 slot = radix_tree_iter_retry(&iter);
370 continue;
371 }
372
373 if (device_match_of_node(hwlock->bank->dev, args.np)) {
374 ret = 0;
375 break;
376 }
377 }
378 rcu_read_unlock();
379 if (ret < 0)
380 goto out;
381
382 id = of_hwspin_lock_simple_xlate(&args);
383 if (id < 0 || id >= hwlock->bank->num_locks) {
384 ret = -EINVAL;
385 goto out;
386 }
387 id += hwlock->bank->base_id;
388
389out:
390 of_node_put(args.np);
391 return ret ? ret : id;
392}
393EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
394
395/**
396 * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
397 * @np: device node from which to request the specific hwlock
398 * @name: hwlock name
399 *
400 * This function provides a means for DT users of the hwspinlock module to
401 * get the global lock id of a specific hwspinlock using the specified name of
402 * the hwspinlock device, so that it can be requested using the normal
403 * hwspin_lock_request_specific() API.
404 *
405 * Returns: the global lock id number on success, -EPROBE_DEFER if the
406 * hwspinlock device is not yet registered, -EINVAL on invalid args
407 * specifier value or an appropriate error as returned from the OF parsing
408 * of the DT client node.
409 */
410int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
411{
412 int index;
413
414 if (!name)
415 return -EINVAL;
416
417 index = of_property_match_string(np, "hwlock-names", name);
418 if (index < 0)
419 return index;
420
421 return of_hwspin_lock_get_id(np, index);
422}
423EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
424
425static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
426{
427 struct hwspinlock *tmp;
428 int ret;
429
430 mutex_lock(&hwspinlock_tree_lock);
431
432 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
433 if (ret) {
434 if (ret == -EEXIST)
435 pr_err("hwspinlock id %d already exists!\n", id);
436 goto out;
437 }
438
439 /* mark this hwspinlock as available */
440 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
441
442 /* self-sanity check which should never fail */
443 WARN_ON(tmp != hwlock);
444
445out:
446 mutex_unlock(&hwspinlock_tree_lock);
447 return 0;
448}
449
450static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
451{
452 struct hwspinlock *hwlock = NULL;
453 int ret;
454
455 mutex_lock(&hwspinlock_tree_lock);
456
457 /* make sure the hwspinlock is not in use (tag is set) */
458 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
459 if (ret == 0) {
460 pr_err("hwspinlock %d still in use (or not present)\n", id);
461 goto out;
462 }
463
464 hwlock = radix_tree_delete(&hwspinlock_tree, id);
465 if (!hwlock) {
466 pr_err("failed to delete hwspinlock %d\n", id);
467 goto out;
468 }
469
470out:
471 mutex_unlock(&hwspinlock_tree_lock);
472 return hwlock;
473}
474
475/**
476 * hwspin_lock_register() - register a new hw spinlock device
477 * @bank: the hwspinlock device, which usually provides numerous hw locks
478 * @dev: the backing device
479 * @ops: hwspinlock handlers for this device
480 * @base_id: id of the first hardware spinlock in this bank
481 * @num_locks: number of hwspinlocks provided by this device
482 *
483 * This function should be called from the underlying platform-specific
484 * implementation, to register a new hwspinlock device instance.
485 *
486 * Should be called from a process context (might sleep)
487 *
488 * Returns: %0 on success, or an appropriate error code on failure
489 */
490int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
491 const struct hwspinlock_ops *ops, int base_id, int num_locks)
492{
493 struct hwspinlock *hwlock;
494 int ret = 0, i;
495
496 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
497 !ops->unlock) {
498 pr_err("invalid parameters\n");
499 return -EINVAL;
500 }
501
502 bank->dev = dev;
503 bank->ops = ops;
504 bank->base_id = base_id;
505 bank->num_locks = num_locks;
506
507 for (i = 0; i < num_locks; i++) {
508 hwlock = &bank->lock[i];
509
510 spin_lock_init(&hwlock->lock);
511 hwlock->bank = bank;
512
513 ret = hwspin_lock_register_single(hwlock, base_id + i);
514 if (ret)
515 goto reg_failed;
516 }
517
518 return 0;
519
520reg_failed:
521 while (--i >= 0)
522 hwspin_lock_unregister_single(base_id + i);
523 return ret;
524}
525EXPORT_SYMBOL_GPL(hwspin_lock_register);
526
527/**
528 * hwspin_lock_unregister() - unregister an hw spinlock device
529 * @bank: the hwspinlock device, which usually provides numerous hw locks
530 *
531 * This function should be called from the underlying platform-specific
532 * implementation, to unregister an existing (and unused) hwspinlock.
533 *
534 * Should be called from a process context (might sleep)
535 *
536 * Returns: %0 on success, or an appropriate error code on failure
537 */
538int hwspin_lock_unregister(struct hwspinlock_device *bank)
539{
540 struct hwspinlock *hwlock, *tmp;
541 int i;
542
543 for (i = 0; i < bank->num_locks; i++) {
544 hwlock = &bank->lock[i];
545
546 tmp = hwspin_lock_unregister_single(bank->base_id + i);
547 if (!tmp)
548 return -EBUSY;
549
550 /* self-sanity check that should never fail */
551 WARN_ON(tmp != hwlock);
552 }
553
554 return 0;
555}
556EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
557
558static void devm_hwspin_lock_unreg(struct device *dev, void *res)
559{
560 hwspin_lock_unregister(*(struct hwspinlock_device **)res);
561}
562
563static int devm_hwspin_lock_device_match(struct device *dev, void *res,
564 void *data)
565{
566 struct hwspinlock_device **bank = res;
567
568 if (WARN_ON(!bank || !*bank))
569 return 0;
570
571 return *bank == data;
572}
573
574/**
575 * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
576 * a managed device
577 * @dev: the backing device
578 * @bank: the hwspinlock device, which usually provides numerous hw locks
579 *
580 * This function should be called from the underlying platform-specific
581 * implementation, to unregister an existing (and unused) hwspinlock.
582 *
583 * Should be called from a process context (might sleep)
584 *
585 * Returns: %0 on success, or an appropriate error code on failure
586 */
587int devm_hwspin_lock_unregister(struct device *dev,
588 struct hwspinlock_device *bank)
589{
590 int ret;
591
592 ret = devres_release(dev, devm_hwspin_lock_unreg,
593 devm_hwspin_lock_device_match, bank);
594 WARN_ON(ret);
595
596 return ret;
597}
598EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
599
600/**
601 * devm_hwspin_lock_register() - register a new hw spinlock device for
602 * a managed device
603 * @dev: the backing device
604 * @bank: the hwspinlock device, which usually provides numerous hw locks
605 * @ops: hwspinlock handlers for this device
606 * @base_id: id of the first hardware spinlock in this bank
607 * @num_locks: number of hwspinlocks provided by this device
608 *
609 * This function should be called from the underlying platform-specific
610 * implementation, to register a new hwspinlock device instance.
611 *
612 * Should be called from a process context (might sleep)
613 *
614 * Returns: %0 on success, or an appropriate error code on failure
615 */
616int devm_hwspin_lock_register(struct device *dev,
617 struct hwspinlock_device *bank,
618 const struct hwspinlock_ops *ops,
619 int base_id, int num_locks)
620{
621 struct hwspinlock_device **ptr;
622 int ret;
623
624 ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
625 if (!ptr)
626 return -ENOMEM;
627
628 ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
629 if (!ret) {
630 *ptr = bank;
631 devres_add(dev, ptr);
632 } else {
633 devres_free(ptr);
634 }
635
636 return ret;
637}
638EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
639
640/**
641 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
642 * @hwlock: the target hwspinlock
643 *
644 * This is an internal function that prepares an hwspinlock instance
645 * before it is given to the user. The function assumes that
646 * hwspinlock_tree_lock is taken.
647 *
648 * Returns: %0 or positive to indicate success, and a negative value to
649 * indicate an error (with the appropriate error code)
650 */
651static int __hwspin_lock_request(struct hwspinlock *hwlock)
652{
653 struct device *dev = hwlock->bank->dev;
654 struct hwspinlock *tmp;
655 int ret;
656
657 /* prevent underlying implementation from being removed */
658 if (!try_module_get(dev->driver->owner)) {
659 dev_err(dev, "%s: can't get owner\n", __func__);
660 return -EINVAL;
661 }
662
663 /* notify PM core that power is now needed */
664 ret = pm_runtime_get_sync(dev);
665 if (ret < 0 && ret != -EACCES) {
666 dev_err(dev, "%s: can't power on device\n", __func__);
667 pm_runtime_put_noidle(dev);
668 module_put(dev->driver->owner);
669 return ret;
670 }
671
672 ret = 0;
673
674 /* mark hwspinlock as used, should not fail */
675 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
676 HWSPINLOCK_UNUSED);
677
678 /* self-sanity check that should never fail */
679 WARN_ON(tmp != hwlock);
680
681 return ret;
682}
683
684/**
685 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
686 * @hwlock: a valid hwspinlock instance
687 *
688 * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
689 */
690int hwspin_lock_get_id(struct hwspinlock *hwlock)
691{
692 if (!hwlock) {
693 pr_err("invalid hwlock\n");
694 return -EINVAL;
695 }
696
697 return hwlock_to_id(hwlock);
698}
699EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
700
701/**
702 * hwspin_lock_request() - request an hwspinlock
703 *
704 * This function should be called by users of the hwspinlock device,
705 * in order to dynamically assign them an unused hwspinlock.
706 * Usually the user of this lock will then have to communicate the lock's id
707 * to the remote core before it can be used for synchronization (to get the
708 * id of a given hwlock, use hwspin_lock_get_id()).
709 *
710 * Should be called from a process context (might sleep)
711 *
712 * Returns: the address of the assigned hwspinlock, or %NULL on error
713 */
714struct hwspinlock *hwspin_lock_request(void)
715{
716 struct hwspinlock *hwlock;
717 int ret;
718
719 mutex_lock(&hwspinlock_tree_lock);
720
721 /* look for an unused lock */
722 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
723 0, 1, HWSPINLOCK_UNUSED);
724 if (ret == 0) {
725 pr_warn("a free hwspinlock is not available\n");
726 hwlock = NULL;
727 goto out;
728 }
729
730 /* sanity check that should never fail */
731 WARN_ON(ret > 1);
732
733 /* mark as used and power up */
734 ret = __hwspin_lock_request(hwlock);
735 if (ret < 0)
736 hwlock = NULL;
737
738out:
739 mutex_unlock(&hwspinlock_tree_lock);
740 return hwlock;
741}
742EXPORT_SYMBOL_GPL(hwspin_lock_request);
743
744/**
745 * hwspin_lock_request_specific() - request for a specific hwspinlock
746 * @id: index of the specific hwspinlock that is requested
747 *
748 * This function should be called by users of the hwspinlock module,
749 * in order to assign them a specific hwspinlock.
750 * Usually early board code will be calling this function in order to
751 * reserve specific hwspinlock ids for predefined purposes.
752 *
753 * Should be called from a process context (might sleep)
754 *
755 * Returns: the address of the assigned hwspinlock, or %NULL on error
756 */
757struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
758{
759 struct hwspinlock *hwlock;
760 int ret;
761
762 mutex_lock(&hwspinlock_tree_lock);
763
764 /* make sure this hwspinlock exists */
765 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
766 if (!hwlock) {
767 pr_warn("hwspinlock %u does not exist\n", id);
768 goto out;
769 }
770
771 /* sanity check (this shouldn't happen) */
772 WARN_ON(hwlock_to_id(hwlock) != id);
773
774 /* make sure this hwspinlock is unused */
775 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
776 if (ret == 0) {
777 pr_warn("hwspinlock %u is already in use\n", id);
778 hwlock = NULL;
779 goto out;
780 }
781
782 /* mark as used and power up */
783 ret = __hwspin_lock_request(hwlock);
784 if (ret < 0)
785 hwlock = NULL;
786
787out:
788 mutex_unlock(&hwspinlock_tree_lock);
789 return hwlock;
790}
791EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
792
793/**
794 * hwspin_lock_free() - free a specific hwspinlock
795 * @hwlock: the specific hwspinlock to free
796 *
797 * This function mark @hwlock as free again.
798 * Should only be called with an @hwlock that was retrieved from
799 * an earlier call to hwspin_lock_request{_specific}.
800 *
801 * Should be called from a process context (might sleep)
802 *
803 * Returns: %0 on success, or an appropriate error code on failure
804 */
805int hwspin_lock_free(struct hwspinlock *hwlock)
806{
807 struct device *dev;
808 struct hwspinlock *tmp;
809 int ret;
810
811 if (!hwlock) {
812 pr_err("invalid hwlock\n");
813 return -EINVAL;
814 }
815
816 dev = hwlock->bank->dev;
817 mutex_lock(&hwspinlock_tree_lock);
818
819 /* make sure the hwspinlock is used */
820 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
821 HWSPINLOCK_UNUSED);
822 if (ret == 1) {
823 dev_err(dev, "%s: hwlock is already free\n", __func__);
824 dump_stack();
825 ret = -EINVAL;
826 goto out;
827 }
828
829 /* notify the underlying device that power is not needed */
830 pm_runtime_put(dev);
831
832 /* mark this hwspinlock as available */
833 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
834 HWSPINLOCK_UNUSED);
835
836 /* sanity check (this shouldn't happen) */
837 WARN_ON(tmp != hwlock);
838
839 module_put(dev->driver->owner);
840
841out:
842 mutex_unlock(&hwspinlock_tree_lock);
843 return ret;
844}
845EXPORT_SYMBOL_GPL(hwspin_lock_free);
846
847static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
848{
849 struct hwspinlock **hwlock = res;
850
851 if (WARN_ON(!hwlock || !*hwlock))
852 return 0;
853
854 return *hwlock == data;
855}
856
857static void devm_hwspin_lock_release(struct device *dev, void *res)
858{
859 hwspin_lock_free(*(struct hwspinlock **)res);
860}
861
862/**
863 * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
864 * @dev: the device to free the specific hwspinlock
865 * @hwlock: the specific hwspinlock to free
866 *
867 * This function mark @hwlock as free again.
868 * Should only be called with an @hwlock that was retrieved from
869 * an earlier call to hwspin_lock_request{_specific}.
870 *
871 * Should be called from a process context (might sleep)
872 *
873 * Returns: %0 on success, or an appropriate error code on failure
874 */
875int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
876{
877 int ret;
878
879 ret = devres_release(dev, devm_hwspin_lock_release,
880 devm_hwspin_lock_match, hwlock);
881 WARN_ON(ret);
882
883 return ret;
884}
885EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
886
887/**
888 * devm_hwspin_lock_request() - request an hwspinlock for a managed device
889 * @dev: the device to request an hwspinlock
890 *
891 * This function should be called by users of the hwspinlock device,
892 * in order to dynamically assign them an unused hwspinlock.
893 * Usually the user of this lock will then have to communicate the lock's id
894 * to the remote core before it can be used for synchronization (to get the
895 * id of a given hwlock, use hwspin_lock_get_id()).
896 *
897 * Should be called from a process context (might sleep)
898 *
899 * Returns: the address of the assigned hwspinlock, or %NULL on error
900 */
901struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
902{
903 struct hwspinlock **ptr, *hwlock;
904
905 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
906 if (!ptr)
907 return NULL;
908
909 hwlock = hwspin_lock_request();
910 if (hwlock) {
911 *ptr = hwlock;
912 devres_add(dev, ptr);
913 } else {
914 devres_free(ptr);
915 }
916
917 return hwlock;
918}
919EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
920
921/**
922 * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
923 * a managed device
924 * @dev: the device to request the specific hwspinlock
925 * @id: index of the specific hwspinlock that is requested
926 *
927 * This function should be called by users of the hwspinlock module,
928 * in order to assign them a specific hwspinlock.
929 * Usually early board code will be calling this function in order to
930 * reserve specific hwspinlock ids for predefined purposes.
931 *
932 * Should be called from a process context (might sleep)
933 *
934 * Returns: the address of the assigned hwspinlock, or %NULL on error
935 */
936struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
937 unsigned int id)
938{
939 struct hwspinlock **ptr, *hwlock;
940
941 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
942 if (!ptr)
943 return NULL;
944
945 hwlock = hwspin_lock_request_specific(id);
946 if (hwlock) {
947 *ptr = hwlock;
948 devres_add(dev, ptr);
949 } else {
950 devres_free(ptr);
951 }
952
953 return hwlock;
954}
955EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
956
957MODULE_DESCRIPTION("Hardware spinlock interface");
958MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
1/*
2 * Hardware spinlock framework
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/jiffies.h>
26#include <linux/radix-tree.h>
27#include <linux/hwspinlock.h>
28#include <linux/pm_runtime.h>
29#include <linux/mutex.h>
30#include <linux/of.h>
31
32#include "hwspinlock_internal.h"
33
34/* radix tree tags */
35#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
36
37/*
38 * A radix tree is used to maintain the available hwspinlock instances.
39 * The tree associates hwspinlock pointers with their integer key id,
40 * and provides easy-to-use API which makes the hwspinlock core code simple
41 * and easy to read.
42 *
43 * Radix trees are quick on lookups, and reasonably efficient in terms of
44 * storage, especially with high density usages such as this framework
45 * requires (a continuous range of integer keys, beginning with zero, is
46 * used as the ID's of the hwspinlock instances).
47 *
48 * The radix tree API supports tagging items in the tree, which this
49 * framework uses to mark unused hwspinlock instances (see the
50 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
51 * tree, looking for an unused hwspinlock instance, is now reduced to a
52 * single radix tree API call.
53 */
54static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
55
56/*
57 * Synchronization of access to the tree is achieved using this mutex,
58 * as the radix-tree API requires that users provide all synchronisation.
59 * A mutex is needed because we're using non-atomic radix tree allocations.
60 */
61static DEFINE_MUTEX(hwspinlock_tree_lock);
62
63
64/**
65 * __hwspin_trylock() - attempt to lock a specific hwspinlock
66 * @hwlock: an hwspinlock which we want to trylock
67 * @mode: controls whether local interrupts are disabled or not
68 * @flags: a pointer where the caller's interrupt state will be saved at (if
69 * requested)
70 *
71 * This function attempts to lock an hwspinlock, and will immediately
72 * fail if the hwspinlock is already taken.
73 *
74 * Upon a successful return from this function, preemption (and possibly
75 * interrupts) is disabled, so the caller must not sleep, and is advised to
76 * release the hwspinlock as soon as possible. This is required in order to
77 * minimize remote cores polling on the hardware interconnect.
78 *
79 * The user decides whether local interrupts are disabled or not, and if yes,
80 * whether he wants their previous state to be saved. It is up to the user
81 * to choose the appropriate @mode of operation, exactly the same way users
82 * should decide between spin_trylock, spin_trylock_irq and
83 * spin_trylock_irqsave.
84 *
85 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
86 * the hwspinlock was already taken.
87 * This function will never sleep.
88 */
89int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
90{
91 int ret;
92
93 BUG_ON(!hwlock);
94 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
95
96 /*
97 * This spin_lock{_irq, _irqsave} serves three purposes:
98 *
99 * 1. Disable preemption, in order to minimize the period of time
100 * in which the hwspinlock is taken. This is important in order
101 * to minimize the possible polling on the hardware interconnect
102 * by a remote user of this lock.
103 * 2. Make the hwspinlock SMP-safe (so we can take it from
104 * additional contexts on the local host).
105 * 3. Ensure that in_atomic/might_sleep checks catch potential
106 * problems with hwspinlock usage (e.g. scheduler checks like
107 * 'scheduling while atomic' etc.)
108 */
109 if (mode == HWLOCK_IRQSTATE)
110 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
111 else if (mode == HWLOCK_IRQ)
112 ret = spin_trylock_irq(&hwlock->lock);
113 else
114 ret = spin_trylock(&hwlock->lock);
115
116 /* is lock already taken by another context on the local cpu ? */
117 if (!ret)
118 return -EBUSY;
119
120 /* try to take the hwspinlock device */
121 ret = hwlock->bank->ops->trylock(hwlock);
122
123 /* if hwlock is already taken, undo spin_trylock_* and exit */
124 if (!ret) {
125 if (mode == HWLOCK_IRQSTATE)
126 spin_unlock_irqrestore(&hwlock->lock, *flags);
127 else if (mode == HWLOCK_IRQ)
128 spin_unlock_irq(&hwlock->lock);
129 else
130 spin_unlock(&hwlock->lock);
131
132 return -EBUSY;
133 }
134
135 /*
136 * We can be sure the other core's memory operations
137 * are observable to us only _after_ we successfully take
138 * the hwspinlock, and we must make sure that subsequent memory
139 * operations (both reads and writes) will not be reordered before
140 * we actually took the hwspinlock.
141 *
142 * Note: the implicit memory barrier of the spinlock above is too
143 * early, so we need this additional explicit memory barrier.
144 */
145 mb();
146
147 return 0;
148}
149EXPORT_SYMBOL_GPL(__hwspin_trylock);
150
151/**
152 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
153 * @hwlock: the hwspinlock to be locked
154 * @timeout: timeout value in msecs
155 * @mode: mode which controls whether local interrupts are disabled or not
156 * @flags: a pointer to where the caller's interrupt state will be saved at (if
157 * requested)
158 *
159 * This function locks the given @hwlock. If the @hwlock
160 * is already taken, the function will busy loop waiting for it to
161 * be released, but give up after @timeout msecs have elapsed.
162 *
163 * Upon a successful return from this function, preemption is disabled
164 * (and possibly local interrupts, too), so the caller must not sleep,
165 * and is advised to release the hwspinlock as soon as possible.
166 * This is required in order to minimize remote cores polling on the
167 * hardware interconnect.
168 *
169 * The user decides whether local interrupts are disabled or not, and if yes,
170 * whether he wants their previous state to be saved. It is up to the user
171 * to choose the appropriate @mode of operation, exactly the same way users
172 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
173 *
174 * Returns 0 when the @hwlock was successfully taken, and an appropriate
175 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
176 * busy after @timeout msecs). The function will never sleep.
177 */
178int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
179 int mode, unsigned long *flags)
180{
181 int ret;
182 unsigned long expire;
183
184 expire = msecs_to_jiffies(to) + jiffies;
185
186 for (;;) {
187 /* Try to take the hwspinlock */
188 ret = __hwspin_trylock(hwlock, mode, flags);
189 if (ret != -EBUSY)
190 break;
191
192 /*
193 * The lock is already taken, let's check if the user wants
194 * us to try again
195 */
196 if (time_is_before_eq_jiffies(expire))
197 return -ETIMEDOUT;
198
199 /*
200 * Allow platform-specific relax handlers to prevent
201 * hogging the interconnect (no sleeping, though)
202 */
203 if (hwlock->bank->ops->relax)
204 hwlock->bank->ops->relax(hwlock);
205 }
206
207 return ret;
208}
209EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
210
211/**
212 * __hwspin_unlock() - unlock a specific hwspinlock
213 * @hwlock: a previously-acquired hwspinlock which we want to unlock
214 * @mode: controls whether local interrupts needs to be restored or not
215 * @flags: previous caller's interrupt state to restore (if requested)
216 *
217 * This function will unlock a specific hwspinlock, enable preemption and
218 * (possibly) enable interrupts or restore their previous state.
219 * @hwlock must be already locked before calling this function: it is a bug
220 * to call unlock on a @hwlock that is already unlocked.
221 *
222 * The user decides whether local interrupts should be enabled or not, and
223 * if yes, whether he wants their previous state to be restored. It is up
224 * to the user to choose the appropriate @mode of operation, exactly the
225 * same way users decide between spin_unlock, spin_unlock_irq and
226 * spin_unlock_irqrestore.
227 *
228 * The function will never sleep.
229 */
230void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
231{
232 BUG_ON(!hwlock);
233 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
234
235 /*
236 * We must make sure that memory operations (both reads and writes),
237 * done before unlocking the hwspinlock, will not be reordered
238 * after the lock is released.
239 *
240 * That's the purpose of this explicit memory barrier.
241 *
242 * Note: the memory barrier induced by the spin_unlock below is too
243 * late; the other core is going to access memory soon after it will
244 * take the hwspinlock, and by then we want to be sure our memory
245 * operations are already observable.
246 */
247 mb();
248
249 hwlock->bank->ops->unlock(hwlock);
250
251 /* Undo the spin_trylock{_irq, _irqsave} called while locking */
252 if (mode == HWLOCK_IRQSTATE)
253 spin_unlock_irqrestore(&hwlock->lock, *flags);
254 else if (mode == HWLOCK_IRQ)
255 spin_unlock_irq(&hwlock->lock);
256 else
257 spin_unlock(&hwlock->lock);
258}
259EXPORT_SYMBOL_GPL(__hwspin_unlock);
260
261/**
262 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
263 * @bank: the hwspinlock device bank
264 * @hwlock_spec: hwlock specifier as found in the device tree
265 *
266 * This is a simple translation function, suitable for hwspinlock platform
267 * drivers that only has a lock specifier length of 1.
268 *
269 * Returns a relative index of the lock within a specified bank on success,
270 * or -EINVAL on invalid specifier cell count.
271 */
272static inline int
273of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
274{
275 if (WARN_ON(hwlock_spec->args_count != 1))
276 return -EINVAL;
277
278 return hwlock_spec->args[0];
279}
280
281/**
282 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
283 * @np: device node from which to request the specific hwlock
284 * @index: index of the hwlock in the list of values
285 *
286 * This function provides a means for DT users of the hwspinlock module to
287 * get the global lock id of a specific hwspinlock using the phandle of the
288 * hwspinlock device, so that it can be requested using the normal
289 * hwspin_lock_request_specific() API.
290 *
291 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
292 * device is not yet registered, -EINVAL on invalid args specifier value or an
293 * appropriate error as returned from the OF parsing of the DT client node.
294 */
295int of_hwspin_lock_get_id(struct device_node *np, int index)
296{
297 struct of_phandle_args args;
298 struct hwspinlock *hwlock;
299 struct radix_tree_iter iter;
300 void **slot;
301 int id;
302 int ret;
303
304 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
305 &args);
306 if (ret)
307 return ret;
308
309 /* Find the hwspinlock device: we need its base_id */
310 ret = -EPROBE_DEFER;
311 rcu_read_lock();
312 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
313 hwlock = radix_tree_deref_slot(slot);
314 if (unlikely(!hwlock))
315 continue;
316 if (radix_tree_is_indirect_ptr(hwlock)) {
317 slot = radix_tree_iter_retry(&iter);
318 continue;
319 }
320
321 if (hwlock->bank->dev->of_node == args.np) {
322 ret = 0;
323 break;
324 }
325 }
326 rcu_read_unlock();
327 if (ret < 0)
328 goto out;
329
330 id = of_hwspin_lock_simple_xlate(&args);
331 if (id < 0 || id >= hwlock->bank->num_locks) {
332 ret = -EINVAL;
333 goto out;
334 }
335 id += hwlock->bank->base_id;
336
337out:
338 of_node_put(args.np);
339 return ret ? ret : id;
340}
341EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
342
343static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
344{
345 struct hwspinlock *tmp;
346 int ret;
347
348 mutex_lock(&hwspinlock_tree_lock);
349
350 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
351 if (ret) {
352 if (ret == -EEXIST)
353 pr_err("hwspinlock id %d already exists!\n", id);
354 goto out;
355 }
356
357 /* mark this hwspinlock as available */
358 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
359
360 /* self-sanity check which should never fail */
361 WARN_ON(tmp != hwlock);
362
363out:
364 mutex_unlock(&hwspinlock_tree_lock);
365 return 0;
366}
367
368static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
369{
370 struct hwspinlock *hwlock = NULL;
371 int ret;
372
373 mutex_lock(&hwspinlock_tree_lock);
374
375 /* make sure the hwspinlock is not in use (tag is set) */
376 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
377 if (ret == 0) {
378 pr_err("hwspinlock %d still in use (or not present)\n", id);
379 goto out;
380 }
381
382 hwlock = radix_tree_delete(&hwspinlock_tree, id);
383 if (!hwlock) {
384 pr_err("failed to delete hwspinlock %d\n", id);
385 goto out;
386 }
387
388out:
389 mutex_unlock(&hwspinlock_tree_lock);
390 return hwlock;
391}
392
393/**
394 * hwspin_lock_register() - register a new hw spinlock device
395 * @bank: the hwspinlock device, which usually provides numerous hw locks
396 * @dev: the backing device
397 * @ops: hwspinlock handlers for this device
398 * @base_id: id of the first hardware spinlock in this bank
399 * @num_locks: number of hwspinlocks provided by this device
400 *
401 * This function should be called from the underlying platform-specific
402 * implementation, to register a new hwspinlock device instance.
403 *
404 * Should be called from a process context (might sleep)
405 *
406 * Returns 0 on success, or an appropriate error code on failure
407 */
408int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
409 const struct hwspinlock_ops *ops, int base_id, int num_locks)
410{
411 struct hwspinlock *hwlock;
412 int ret = 0, i;
413
414 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
415 !ops->unlock) {
416 pr_err("invalid parameters\n");
417 return -EINVAL;
418 }
419
420 bank->dev = dev;
421 bank->ops = ops;
422 bank->base_id = base_id;
423 bank->num_locks = num_locks;
424
425 for (i = 0; i < num_locks; i++) {
426 hwlock = &bank->lock[i];
427
428 spin_lock_init(&hwlock->lock);
429 hwlock->bank = bank;
430
431 ret = hwspin_lock_register_single(hwlock, base_id + i);
432 if (ret)
433 goto reg_failed;
434 }
435
436 return 0;
437
438reg_failed:
439 while (--i >= 0)
440 hwspin_lock_unregister_single(base_id + i);
441 return ret;
442}
443EXPORT_SYMBOL_GPL(hwspin_lock_register);
444
445/**
446 * hwspin_lock_unregister() - unregister an hw spinlock device
447 * @bank: the hwspinlock device, which usually provides numerous hw locks
448 *
449 * This function should be called from the underlying platform-specific
450 * implementation, to unregister an existing (and unused) hwspinlock.
451 *
452 * Should be called from a process context (might sleep)
453 *
454 * Returns 0 on success, or an appropriate error code on failure
455 */
456int hwspin_lock_unregister(struct hwspinlock_device *bank)
457{
458 struct hwspinlock *hwlock, *tmp;
459 int i;
460
461 for (i = 0; i < bank->num_locks; i++) {
462 hwlock = &bank->lock[i];
463
464 tmp = hwspin_lock_unregister_single(bank->base_id + i);
465 if (!tmp)
466 return -EBUSY;
467
468 /* self-sanity check that should never fail */
469 WARN_ON(tmp != hwlock);
470 }
471
472 return 0;
473}
474EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
475
476/**
477 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
478 *
479 * This is an internal function that prepares an hwspinlock instance
480 * before it is given to the user. The function assumes that
481 * hwspinlock_tree_lock is taken.
482 *
483 * Returns 0 or positive to indicate success, and a negative value to
484 * indicate an error (with the appropriate error code)
485 */
486static int __hwspin_lock_request(struct hwspinlock *hwlock)
487{
488 struct device *dev = hwlock->bank->dev;
489 struct hwspinlock *tmp;
490 int ret;
491
492 /* prevent underlying implementation from being removed */
493 if (!try_module_get(dev->driver->owner)) {
494 dev_err(dev, "%s: can't get owner\n", __func__);
495 return -EINVAL;
496 }
497
498 /* notify PM core that power is now needed */
499 ret = pm_runtime_get_sync(dev);
500 if (ret < 0) {
501 dev_err(dev, "%s: can't power on device\n", __func__);
502 pm_runtime_put_noidle(dev);
503 module_put(dev->driver->owner);
504 return ret;
505 }
506
507 /* mark hwspinlock as used, should not fail */
508 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
509 HWSPINLOCK_UNUSED);
510
511 /* self-sanity check that should never fail */
512 WARN_ON(tmp != hwlock);
513
514 return ret;
515}
516
517/**
518 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
519 * @hwlock: a valid hwspinlock instance
520 *
521 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
522 */
523int hwspin_lock_get_id(struct hwspinlock *hwlock)
524{
525 if (!hwlock) {
526 pr_err("invalid hwlock\n");
527 return -EINVAL;
528 }
529
530 return hwlock_to_id(hwlock);
531}
532EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
533
534/**
535 * hwspin_lock_request() - request an hwspinlock
536 *
537 * This function should be called by users of the hwspinlock device,
538 * in order to dynamically assign them an unused hwspinlock.
539 * Usually the user of this lock will then have to communicate the lock's id
540 * to the remote core before it can be used for synchronization (to get the
541 * id of a given hwlock, use hwspin_lock_get_id()).
542 *
543 * Should be called from a process context (might sleep)
544 *
545 * Returns the address of the assigned hwspinlock, or NULL on error
546 */
547struct hwspinlock *hwspin_lock_request(void)
548{
549 struct hwspinlock *hwlock;
550 int ret;
551
552 mutex_lock(&hwspinlock_tree_lock);
553
554 /* look for an unused lock */
555 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
556 0, 1, HWSPINLOCK_UNUSED);
557 if (ret == 0) {
558 pr_warn("a free hwspinlock is not available\n");
559 hwlock = NULL;
560 goto out;
561 }
562
563 /* sanity check that should never fail */
564 WARN_ON(ret > 1);
565
566 /* mark as used and power up */
567 ret = __hwspin_lock_request(hwlock);
568 if (ret < 0)
569 hwlock = NULL;
570
571out:
572 mutex_unlock(&hwspinlock_tree_lock);
573 return hwlock;
574}
575EXPORT_SYMBOL_GPL(hwspin_lock_request);
576
577/**
578 * hwspin_lock_request_specific() - request for a specific hwspinlock
579 * @id: index of the specific hwspinlock that is requested
580 *
581 * This function should be called by users of the hwspinlock module,
582 * in order to assign them a specific hwspinlock.
583 * Usually early board code will be calling this function in order to
584 * reserve specific hwspinlock ids for predefined purposes.
585 *
586 * Should be called from a process context (might sleep)
587 *
588 * Returns the address of the assigned hwspinlock, or NULL on error
589 */
590struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
591{
592 struct hwspinlock *hwlock;
593 int ret;
594
595 mutex_lock(&hwspinlock_tree_lock);
596
597 /* make sure this hwspinlock exists */
598 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
599 if (!hwlock) {
600 pr_warn("hwspinlock %u does not exist\n", id);
601 goto out;
602 }
603
604 /* sanity check (this shouldn't happen) */
605 WARN_ON(hwlock_to_id(hwlock) != id);
606
607 /* make sure this hwspinlock is unused */
608 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
609 if (ret == 0) {
610 pr_warn("hwspinlock %u is already in use\n", id);
611 hwlock = NULL;
612 goto out;
613 }
614
615 /* mark as used and power up */
616 ret = __hwspin_lock_request(hwlock);
617 if (ret < 0)
618 hwlock = NULL;
619
620out:
621 mutex_unlock(&hwspinlock_tree_lock);
622 return hwlock;
623}
624EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
625
626/**
627 * hwspin_lock_free() - free a specific hwspinlock
628 * @hwlock: the specific hwspinlock to free
629 *
630 * This function mark @hwlock as free again.
631 * Should only be called with an @hwlock that was retrieved from
632 * an earlier call to omap_hwspin_lock_request{_specific}.
633 *
634 * Should be called from a process context (might sleep)
635 *
636 * Returns 0 on success, or an appropriate error code on failure
637 */
638int hwspin_lock_free(struct hwspinlock *hwlock)
639{
640 struct device *dev;
641 struct hwspinlock *tmp;
642 int ret;
643
644 if (!hwlock) {
645 pr_err("invalid hwlock\n");
646 return -EINVAL;
647 }
648
649 dev = hwlock->bank->dev;
650 mutex_lock(&hwspinlock_tree_lock);
651
652 /* make sure the hwspinlock is used */
653 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
654 HWSPINLOCK_UNUSED);
655 if (ret == 1) {
656 dev_err(dev, "%s: hwlock is already free\n", __func__);
657 dump_stack();
658 ret = -EINVAL;
659 goto out;
660 }
661
662 /* notify the underlying device that power is not needed */
663 ret = pm_runtime_put(dev);
664 if (ret < 0)
665 goto out;
666
667 /* mark this hwspinlock as available */
668 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
669 HWSPINLOCK_UNUSED);
670
671 /* sanity check (this shouldn't happen) */
672 WARN_ON(tmp != hwlock);
673
674 module_put(dev->driver->owner);
675
676out:
677 mutex_unlock(&hwspinlock_tree_lock);
678 return ret;
679}
680EXPORT_SYMBOL_GPL(hwspin_lock_free);
681
682MODULE_LICENSE("GPL v2");
683MODULE_DESCRIPTION("Hardware spinlock interface");
684MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");