blob: f2c36fb5e6616b00a011af52584f68a7b01ad5ef [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Mikulas Patocka62ac6652012-09-26 07:46:43 +02002#ifndef _LINUX_PERCPU_RWSEM_H
3#define _LINUX_PERCPU_RWSEM_H
4
Oleg Nesterov9390ef02012-12-17 16:01:36 -08005#include <linux/atomic.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08006#include <linux/rwsem.h>
Mikulas Patocka62ac6652012-09-26 07:46:43 +02007#include <linux/percpu.h>
Davidlohr Bueso52b94122017-01-11 07:22:26 -08008#include <linux/rcuwait.h>
Oleg Nesterov001dac62015-08-21 19:42:57 +02009#include <linux/rcu_sync.h>
Oleg Nesterov8ebe34732012-12-17 16:01:38 -080010#include <linux/lockdep.h>
Mikulas Patocka62ac6652012-09-26 07:46:43 +020011
12struct percpu_rw_semaphore {
Oleg Nesterov001dac62015-08-21 19:42:57 +020013 struct rcu_sync rss;
Peter Zijlstra80127a32016-07-14 20:08:46 +020014 unsigned int __percpu *read_count;
Davidlohr Bueso52b94122017-01-11 07:22:26 -080015 struct rw_semaphore rw_sem; /* slowpath */
16 struct rcuwait writer; /* blocked writer */
Peter Zijlstra80127a32016-07-14 20:08:46 +020017 int readers_block;
Peter Zijlstra17510602019-10-30 20:01:26 +010018#ifdef CONFIG_DEBUG_LOCK_ALLOC
19 struct lockdep_map dep_map;
20#endif
Mikulas Patocka62ac6652012-09-26 07:46:43 +020021};
22
Peter Zijlstra17510602019-10-30 20:01:26 +010023#ifdef CONFIG_DEBUG_LOCK_ALLOC
24#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
25#else
26#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
27#endif
28
Oleg Nesterov3f2947b72019-04-23 18:32:41 +020029#define __DEFINE_PERCPU_RWSEM(name, is_static) \
Peter Zijlstra11d96842015-06-22 14:16:31 +020030static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
Oleg Nesterov3f2947b72019-04-23 18:32:41 +020031is_static struct percpu_rw_semaphore name = { \
Oleg Nesterov95bf33b2019-04-23 14:07:24 +020032 .rss = __RCU_SYNC_INITIALIZER(name.rss), \
Peter Zijlstra11d96842015-06-22 14:16:31 +020033 .read_count = &__percpu_rwsem_rc_##name, \
34 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
Davidlohr Bueso52b94122017-01-11 07:22:26 -080035 .writer = __RCUWAIT_INITIALIZER(name.writer), \
Peter Zijlstra17510602019-10-30 20:01:26 +010036 __PERCPU_RWSEM_DEP_MAP_INIT(name) \
Peter Zijlstra11d96842015-06-22 14:16:31 +020037}
Peter Zijlstra17510602019-10-30 20:01:26 +010038
Oleg Nesterov3f2947b72019-04-23 18:32:41 +020039#define DEFINE_PERCPU_RWSEM(name) \
40 __DEFINE_PERCPU_RWSEM(name, /* not static */)
41#define DEFINE_STATIC_PERCPU_RWSEM(name) \
42 __DEFINE_PERCPU_RWSEM(name, static)
Peter Zijlstra11d96842015-06-22 14:16:31 +020043
Peter Zijlstra80127a32016-07-14 20:08:46 +020044extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
45extern void __percpu_up_read(struct percpu_rw_semaphore *);
46
Peter Zijlstra02e525b22019-02-21 15:38:40 +010047static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
Peter Zijlstra80127a32016-07-14 20:08:46 +020048{
49 might_sleep();
50
Peter Zijlstra17510602019-10-30 20:01:26 +010051 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
Peter Zijlstra80127a32016-07-14 20:08:46 +020052
53 preempt_disable();
54 /*
55 * We are in an RCU-sched read-side critical section, so the writer
56 * cannot both change sem->state from readers_fast and start checking
57 * counters while we are here. So if we see !sem->state, we know that
58 * the writer won't be checking until we're past the preempt_enable()
Paul E. McKenneye3e74052018-11-07 13:53:34 -080059 * and that once the synchronize_rcu() is done, the writer will see
Peter Zijlstra80127a32016-07-14 20:08:46 +020060 * anything we did within this RCU-sched read-size critical section.
61 */
62 __this_cpu_inc(*sem->read_count);
63 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
64 __percpu_down_read(sem, false); /* Unconditional memory barrier */
Peter Zijlstra80127a32016-07-14 20:08:46 +020065 /*
Peter Zijlstra02e525b22019-02-21 15:38:40 +010066 * The preempt_enable() prevents the compiler from
Peter Zijlstra80127a32016-07-14 20:08:46 +020067 * bleeding the critical section out.
68 */
Peter Zijlstra259d69b2015-11-23 15:23:55 +010069 preempt_enable();
70}
71
Peter Zijlstra80127a32016-07-14 20:08:46 +020072static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
73{
74 int ret = 1;
75
76 preempt_disable();
77 /*
78 * Same as in percpu_down_read().
79 */
80 __this_cpu_inc(*sem->read_count);
81 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
82 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
83 preempt_enable();
84 /*
85 * The barrier() from preempt_enable() prevents the compiler from
86 * bleeding the critical section out.
87 */
88
89 if (ret)
Peter Zijlstra17510602019-10-30 20:01:26 +010090 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
Peter Zijlstra80127a32016-07-14 20:08:46 +020091
92 return ret;
93}
94
Peter Zijlstra02e525b22019-02-21 15:38:40 +010095static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
Peter Zijlstra80127a32016-07-14 20:08:46 +020096{
Peter Zijlstra17510602019-10-30 20:01:26 +010097 rwsem_release(&sem->dep_map, _RET_IP_);
98
Peter Zijlstra02e525b22019-02-21 15:38:40 +010099 preempt_disable();
Peter Zijlstra80127a32016-07-14 20:08:46 +0200100 /*
101 * Same as in percpu_down_read().
102 */
103 if (likely(rcu_sync_is_idle(&sem->rss)))
104 __this_cpu_dec(*sem->read_count);
105 else
106 __percpu_up_read(sem); /* Unconditional memory barrier */
107 preempt_enable();
Peter Zijlstra80127a32016-07-14 20:08:46 +0200108}
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -0400109
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800110extern void percpu_down_write(struct percpu_rw_semaphore *);
111extern void percpu_up_write(struct percpu_rw_semaphore *);
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200112
Oleg Nesterov8ebe34732012-12-17 16:01:38 -0800113extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
114 const char *, struct lock_class_key *);
Peter Zijlstra80127a32016-07-14 20:08:46 +0200115
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800116extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200117
Peter Zijlstra80127a32016-07-14 20:08:46 +0200118#define percpu_init_rwsem(sem) \
Oleg Nesterov8ebe34732012-12-17 16:01:38 -0800119({ \
120 static struct lock_class_key rwsem_key; \
Peter Zijlstra80127a32016-07-14 20:08:46 +0200121 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
Oleg Nesterov8ebe34732012-12-17 16:01:38 -0800122})
123
Peter Zijlstra17510602019-10-30 20:01:26 +0100124#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
125#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
Peter Zijlstra11d96842015-06-22 14:16:31 +0200126
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200127static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
128 bool read, unsigned long ip)
129{
Peter Zijlstra17510602019-10-30 20:01:26 +0100130 lock_release(&sem->dep_map, ip);
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200131#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
132 if (!read)
Waiman Long94a97172019-05-20 16:59:12 -0400133 atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200134#endif
135}
136
137static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
138 bool read, unsigned long ip)
139{
Peter Zijlstra17510602019-10-30 20:01:26 +0100140 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
Waiman Long5a817642018-05-15 17:49:51 -0400141#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
142 if (!read)
Waiman Long94a97172019-05-20 16:59:12 -0400143 atomic_long_set(&sem->rw_sem.owner, (long)current);
Waiman Long5a817642018-05-15 17:49:51 -0400144#endif
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200145}
146
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200147#endif