blob: a2b0abc1a470508f862a3d2ca00bd0d730bdcd92 [file] [log] [blame]
[email protected]7ec81662014-03-24 18:52:361// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation, use base/atomicops.h instead.
6
7// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of
8// the hand coded assembly without introducing perf regressions.
9// TODO(rmcilroy): Investigate whether we can use acquire / release versions of
10// exclusive load / store assembly instructions and do away with
11// the barriers.
12
13#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
14#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
15
16#if defined(OS_QNX)
17#include <sys/cpuinline.h>
18#endif
19
20namespace base {
21namespace subtle {
22
23inline void MemoryBarrier() {
24 __asm__ __volatile__ ( // NOLINT
25 "dmb ish \n\t" // Data memory barrier.
26 ::: "memory"
27 ); // NOLINT
28}
29
30
31inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
32 Atomic32 old_value,
33 Atomic32 new_value) {
34 Atomic32 prev;
35 int32_t temp;
36
37 __asm__ __volatile__ ( // NOLINT
38 "0: \n\t"
39 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
40 "cmp %w[prev], %w[old_value] \n\t"
41 "bne 1f \n\t"
42 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
43 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
44 "1: \n\t"
45 "clrex \n\t" // In case we didn't swap.
46 : [prev]"=&r" (prev),
47 [temp]"=&r" (temp),
48 [ptr]"+Q" (*ptr)
49 : [old_value]"r" (old_value),
50 [new_value]"r" (new_value)
51 : "memory", "cc"
52 ); // NOLINT
53
54 return prev;
55}
56
57inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
58 Atomic32 new_value) {
59 Atomic32 result;
60 int32_t temp;
61
62 __asm__ __volatile__ ( // NOLINT
63 "0: \n\t"
64 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
65 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
66 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
67 : [result]"=&r" (result),
68 [temp]"=&r" (temp),
69 [ptr]"+Q" (*ptr)
70 : [new_value]"r" (new_value)
71 : "memory"
72 ); // NOLINT
73
74 return result;
75}
76
77inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
78 Atomic32 increment) {
79 Atomic32 result;
80 int32_t temp;
81
82 __asm__ __volatile__ ( // NOLINT
83 "0: \n\t"
84 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
85 "add %w[result], %w[result], %w[increment]\n\t"
86 "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
87 "cbnz %w[temp], 0b \n\t" // Retry on failure.
88 : [result]"=&r" (result),
89 [temp]"=&r" (temp),
90 [ptr]"+Q" (*ptr)
91 : [increment]"r" (increment)
92 : "memory"
93 ); // NOLINT
94
95 return result;
96}
97
98inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
99 Atomic32 increment) {
100 MemoryBarrier();
101 Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
102 MemoryBarrier();
103
104 return result;
105}
106
107inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
108 Atomic32 old_value,
109 Atomic32 new_value) {
110 Atomic32 prev;
111 int32_t temp;
112
113 __asm__ __volatile__ ( // NOLINT
114 "0: \n\t"
115 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
116 "cmp %w[prev], %w[old_value] \n\t"
117 "bne 1f \n\t"
118 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
119 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
120 "dmb ish \n\t" // Data memory barrier.
121 "1: \n\t"
122 // If the compare failed the 'dmb' is unnecessary, but we still need a
123 // 'clrex'.
124 "clrex \n\t"
125 : [prev]"=&r" (prev),
126 [temp]"=&r" (temp),
127 [ptr]"+Q" (*ptr)
128 : [old_value]"r" (old_value),
129 [new_value]"r" (new_value)
130 : "memory", "cc"
131 ); // NOLINT
132
133 return prev;
134}
135
136inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
137 Atomic32 old_value,
138 Atomic32 new_value) {
139 Atomic32 prev;
140 int32_t temp;
141
142 MemoryBarrier();
143
144 __asm__ __volatile__ ( // NOLINT
145 "0: \n\t"
146 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
147 "cmp %w[prev], %w[old_value] \n\t"
148 "bne 1f \n\t"
149 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
150 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
151 "1: \n\t"
152 // If the compare failed the we still need a 'clrex'.
153 "clrex \n\t"
154 : [prev]"=&r" (prev),
155 [temp]"=&r" (temp),
156 [ptr]"+Q" (*ptr)
157 : [old_value]"r" (old_value),
158 [new_value]"r" (new_value)
159 : "memory", "cc"
160 ); // NOLINT
161
162 return prev;
163}
164
165inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
166 *ptr = value;
167}
168
169inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
170 *ptr = value;
171 MemoryBarrier();
172}
173
174inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
175 MemoryBarrier();
176 *ptr = value;
177}
178
179inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
180 return *ptr;
181}
182
183inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
184 Atomic32 value = *ptr;
185 MemoryBarrier();
186 return value;
187}
188
189inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
190 MemoryBarrier();
191 return *ptr;
192}
193
194// 64-bit versions of the operations.
195// See the 32-bit versions for comments.
196
197inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
198 Atomic64 old_value,
199 Atomic64 new_value) {
200 Atomic64 prev;
201 int32_t temp;
202
203 __asm__ __volatile__ ( // NOLINT
204 "0: \n\t"
205 "ldxr %[prev], %[ptr] \n\t"
206 "cmp %[prev], %[old_value] \n\t"
207 "bne 1f \n\t"
208 "stxr %w[temp], %[new_value], %[ptr] \n\t"
209 "cbnz %w[temp], 0b \n\t"
210 "1: \n\t"
211 "clrex \n\t"
212 : [prev]"=&r" (prev),
213 [temp]"=&r" (temp),
214 [ptr]"+Q" (*ptr)
215 : [old_value]"r" (old_value),
216 [new_value]"r" (new_value)
217 : "memory", "cc"
218 ); // NOLINT
219
220 return prev;
221}
222
223inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
224 Atomic64 new_value) {
225 Atomic64 result;
226 int32_t temp;
227
228 __asm__ __volatile__ ( // NOLINT
229 "0: \n\t"
230 "ldxr %[result], %[ptr] \n\t"
231 "stxr %w[temp], %[new_value], %[ptr] \n\t"
232 "cbnz %w[temp], 0b \n\t"
233 : [result]"=&r" (result),
234 [temp]"=&r" (temp),
235 [ptr]"+Q" (*ptr)
236 : [new_value]"r" (new_value)
237 : "memory"
238 ); // NOLINT
239
240 return result;
241}
242
243inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
244 Atomic64 increment) {
245 Atomic64 result;
246 int32_t temp;
247
248 __asm__ __volatile__ ( // NOLINT
249 "0: \n\t"
250 "ldxr %[result], %[ptr] \n\t"
251 "add %[result], %[result], %[increment] \n\t"
252 "stxr %w[temp], %[result], %[ptr] \n\t"
253 "cbnz %w[temp], 0b \n\t"
254 : [result]"=&r" (result),
255 [temp]"=&r" (temp),
256 [ptr]"+Q" (*ptr)
257 : [increment]"r" (increment)
258 : "memory"
259 ); // NOLINT
260
261 return result;
262}
263
264inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
265 Atomic64 increment) {
266 MemoryBarrier();
267 Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
268 MemoryBarrier();
269
270 return result;
271}
272
273inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
274 Atomic64 old_value,
275 Atomic64 new_value) {
276 Atomic64 prev;
277 int32_t temp;
278
279 __asm__ __volatile__ ( // NOLINT
280 "0: \n\t"
281 "ldxr %[prev], %[ptr] \n\t"
282 "cmp %[prev], %[old_value] \n\t"
283 "bne 1f \n\t"
284 "stxr %w[temp], %[new_value], %[ptr] \n\t"
285 "cbnz %w[temp], 0b \n\t"
286 "dmb ish \n\t"
287 "1: \n\t"
288 "clrex \n\t"
289 : [prev]"=&r" (prev),
290 [temp]"=&r" (temp),
291 [ptr]"+Q" (*ptr)
292 : [old_value]"r" (old_value),
293 [new_value]"r" (new_value)
294 : "memory", "cc"
295 ); // NOLINT
296
297 return prev;
298}
299
300inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
301 Atomic64 old_value,
302 Atomic64 new_value) {
303 Atomic64 prev;
304 int32_t temp;
305
306 MemoryBarrier();
307
308 __asm__ __volatile__ ( // NOLINT
309 "0: \n\t"
310 "ldxr %[prev], %[ptr] \n\t"
311 "cmp %[prev], %[old_value] \n\t"
312 "bne 1f \n\t"
313 "stxr %w[temp], %[new_value], %[ptr] \n\t"
314 "cbnz %w[temp], 0b \n\t"
315 "1: \n\t"
316 "clrex \n\t"
317 : [prev]"=&r" (prev),
318 [temp]"=&r" (temp),
319 [ptr]"+Q" (*ptr)
320 : [old_value]"r" (old_value),
321 [new_value]"r" (new_value)
322 : "memory", "cc"
323 ); // NOLINT
324
325 return prev;
326}
327
328inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
329 *ptr = value;
330}
331
332inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
333 *ptr = value;
334 MemoryBarrier();
335}
336
337inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
338 MemoryBarrier();
339 *ptr = value;
340}
341
342inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
343 return *ptr;
344}
345
346inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
347 Atomic64 value = *ptr;
348 MemoryBarrier();
349 return value;
350}
351
352inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
353 MemoryBarrier();
354 return *ptr;
355}
356
357} // namespace base::subtle
358} // namespace base
359
360#endif // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_