Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 2 | #ifndef _LINUX_PERCPU_RWSEM_H |
| 3 | #define _LINUX_PERCPU_RWSEM_H |
| 4 | |
Oleg Nesterov | 9390ef0 | 2012-12-17 16:01:36 -0800 | [diff] [blame] | 5 | #include <linux/atomic.h> |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 6 | #include <linux/rwsem.h> |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 7 | #include <linux/percpu.h> |
Davidlohr Bueso | 52b9412 | 2017-01-11 07:22:26 -0800 | [diff] [blame] | 8 | #include <linux/rcuwait.h> |
Oleg Nesterov | 001dac6 | 2015-08-21 19:42:57 +0200 | [diff] [blame] | 9 | #include <linux/rcu_sync.h> |
Oleg Nesterov | 8ebe3473 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 10 | #include <linux/lockdep.h> |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 11 | |
| 12 | struct percpu_rw_semaphore { |
Oleg Nesterov | 001dac6 | 2015-08-21 19:42:57 +0200 | [diff] [blame] | 13 | struct rcu_sync rss; |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 14 | unsigned int __percpu *read_count; |
Davidlohr Bueso | 52b9412 | 2017-01-11 07:22:26 -0800 | [diff] [blame] | 15 | struct rw_semaphore rw_sem; /* slowpath */ |
| 16 | struct rcuwait writer; /* blocked writer */ |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 17 | int readers_block; |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 18 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 19 | struct lockdep_map dep_map; |
| 20 | #endif |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 21 | }; |
| 22 | |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 23 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 24 | #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, |
| 25 | #else |
| 26 | #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) |
| 27 | #endif |
| 28 | |
Oleg Nesterov | 3f2947b7 | 2019-04-23 18:32:41 +0200 | [diff] [blame] | 29 | #define __DEFINE_PERCPU_RWSEM(name, is_static) \ |
Peter Zijlstra | 11d9684 | 2015-06-22 14:16:31 +0200 | [diff] [blame] | 30 | static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ |
Oleg Nesterov | 3f2947b7 | 2019-04-23 18:32:41 +0200 | [diff] [blame] | 31 | is_static struct percpu_rw_semaphore name = { \ |
Oleg Nesterov | 95bf33b | 2019-04-23 14:07:24 +0200 | [diff] [blame] | 32 | .rss = __RCU_SYNC_INITIALIZER(name.rss), \ |
Peter Zijlstra | 11d9684 | 2015-06-22 14:16:31 +0200 | [diff] [blame] | 33 | .read_count = &__percpu_rwsem_rc_##name, \ |
| 34 | .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ |
Davidlohr Bueso | 52b9412 | 2017-01-11 07:22:26 -0800 | [diff] [blame] | 35 | .writer = __RCUWAIT_INITIALIZER(name.writer), \ |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 36 | __PERCPU_RWSEM_DEP_MAP_INIT(name) \ |
Peter Zijlstra | 11d9684 | 2015-06-22 14:16:31 +0200 | [diff] [blame] | 37 | } |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 38 | |
Oleg Nesterov | 3f2947b7 | 2019-04-23 18:32:41 +0200 | [diff] [blame] | 39 | #define DEFINE_PERCPU_RWSEM(name) \ |
| 40 | __DEFINE_PERCPU_RWSEM(name, /* not static */) |
| 41 | #define DEFINE_STATIC_PERCPU_RWSEM(name) \ |
| 42 | __DEFINE_PERCPU_RWSEM(name, static) |
Peter Zijlstra | 11d9684 | 2015-06-22 14:16:31 +0200 | [diff] [blame] | 43 | |
Peter Zijlstra | 206c98f | 2019-10-30 20:12:37 +0100 | [diff] [blame^] | 44 | extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 45 | extern void __percpu_up_read(struct percpu_rw_semaphore *); |
| 46 | |
Peter Zijlstra | 02e525b2 | 2019-02-21 15:38:40 +0100 | [diff] [blame] | 47 | static inline void percpu_down_read(struct percpu_rw_semaphore *sem) |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 48 | { |
| 49 | might_sleep(); |
| 50 | |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 51 | rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 52 | |
| 53 | preempt_disable(); |
| 54 | /* |
| 55 | * We are in an RCU-sched read-side critical section, so the writer |
| 56 | * cannot both change sem->state from readers_fast and start checking |
| 57 | * counters while we are here. So if we see !sem->state, we know that |
| 58 | * the writer won't be checking until we're past the preempt_enable() |
Paul E. McKenney | e3e7405 | 2018-11-07 13:53:34 -0800 | [diff] [blame] | 59 | * and that once the synchronize_rcu() is done, the writer will see |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 60 | * anything we did within this RCU-sched read-size critical section. |
| 61 | */ |
| 62 | __this_cpu_inc(*sem->read_count); |
| 63 | if (unlikely(!rcu_sync_is_idle(&sem->rss))) |
| 64 | __percpu_down_read(sem, false); /* Unconditional memory barrier */ |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 65 | /* |
Peter Zijlstra | 02e525b2 | 2019-02-21 15:38:40 +0100 | [diff] [blame] | 66 | * The preempt_enable() prevents the compiler from |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 67 | * bleeding the critical section out. |
| 68 | */ |
Peter Zijlstra | 259d69b | 2015-11-23 15:23:55 +0100 | [diff] [blame] | 69 | preempt_enable(); |
| 70 | } |
| 71 | |
Peter Zijlstra | 206c98f | 2019-10-30 20:12:37 +0100 | [diff] [blame^] | 72 | static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 73 | { |
Peter Zijlstra | 206c98f | 2019-10-30 20:12:37 +0100 | [diff] [blame^] | 74 | bool ret = true; |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 75 | |
| 76 | preempt_disable(); |
| 77 | /* |
| 78 | * Same as in percpu_down_read(). |
| 79 | */ |
| 80 | __this_cpu_inc(*sem->read_count); |
| 81 | if (unlikely(!rcu_sync_is_idle(&sem->rss))) |
| 82 | ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ |
| 83 | preempt_enable(); |
| 84 | /* |
| 85 | * The barrier() from preempt_enable() prevents the compiler from |
| 86 | * bleeding the critical section out. |
| 87 | */ |
| 88 | |
| 89 | if (ret) |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 90 | rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 91 | |
| 92 | return ret; |
| 93 | } |
| 94 | |
Peter Zijlstra | 02e525b2 | 2019-02-21 15:38:40 +0100 | [diff] [blame] | 95 | static inline void percpu_up_read(struct percpu_rw_semaphore *sem) |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 96 | { |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 97 | rwsem_release(&sem->dep_map, _RET_IP_); |
| 98 | |
Peter Zijlstra | 02e525b2 | 2019-02-21 15:38:40 +0100 | [diff] [blame] | 99 | preempt_disable(); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 100 | /* |
| 101 | * Same as in percpu_down_read(). |
| 102 | */ |
| 103 | if (likely(rcu_sync_is_idle(&sem->rss))) |
| 104 | __this_cpu_dec(*sem->read_count); |
| 105 | else |
| 106 | __percpu_up_read(sem); /* Unconditional memory barrier */ |
| 107 | preempt_enable(); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 108 | } |
Mikulas Patocka | 5c1eabe | 2012-10-22 19:37:47 -0400 | [diff] [blame] | 109 | |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 110 | extern void percpu_down_write(struct percpu_rw_semaphore *); |
| 111 | extern void percpu_up_write(struct percpu_rw_semaphore *); |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 112 | |
Oleg Nesterov | 8ebe3473 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 113 | extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, |
| 114 | const char *, struct lock_class_key *); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 115 | |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 116 | extern void percpu_free_rwsem(struct percpu_rw_semaphore *); |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 117 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 118 | #define percpu_init_rwsem(sem) \ |
Oleg Nesterov | 8ebe3473 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 119 | ({ \ |
| 120 | static struct lock_class_key rwsem_key; \ |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 121 | __percpu_init_rwsem(sem, #sem, &rwsem_key); \ |
Oleg Nesterov | 8ebe3473 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 122 | }) |
| 123 | |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 124 | #define percpu_rwsem_is_held(sem) lockdep_is_held(sem) |
| 125 | #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) |
Peter Zijlstra | 11d9684 | 2015-06-22 14:16:31 +0200 | [diff] [blame] | 126 | |
Oleg Nesterov | 55cc15650 | 2015-07-21 20:26:44 +0200 | [diff] [blame] | 127 | static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, |
| 128 | bool read, unsigned long ip) |
| 129 | { |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 130 | lock_release(&sem->dep_map, ip); |
Oleg Nesterov | 55cc15650 | 2015-07-21 20:26:44 +0200 | [diff] [blame] | 131 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 132 | if (!read) |
Waiman Long | 94a9717 | 2019-05-20 16:59:12 -0400 | [diff] [blame] | 133 | atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN); |
Oleg Nesterov | 55cc15650 | 2015-07-21 20:26:44 +0200 | [diff] [blame] | 134 | #endif |
| 135 | } |
| 136 | |
| 137 | static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, |
| 138 | bool read, unsigned long ip) |
| 139 | { |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 140 | lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip); |
Waiman Long | 5a81764 | 2018-05-15 17:49:51 -0400 | [diff] [blame] | 141 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 142 | if (!read) |
Waiman Long | 94a9717 | 2019-05-20 16:59:12 -0400 | [diff] [blame] | 143 | atomic_long_set(&sem->rw_sem.owner, (long)current); |
Waiman Long | 5a81764 | 2018-05-15 17:49:51 -0400 | [diff] [blame] | 144 | #endif |
Oleg Nesterov | 55cc15650 | 2015-07-21 20:26:44 +0200 | [diff] [blame] | 145 | } |
| 146 | |
Mikulas Patocka | 62ac665 | 2012-09-26 07:46:43 +0200 | [diff] [blame] | 147 | #endif |