[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 1 | // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include <stdio.h> |
| 6 | #include <stdlib.h> |
| 7 | #include <algorithm> // for min() |
| 8 | #include "base/atomicops.h" |
| 9 | #include "base/logging.h" |
| 10 | #include "testing/gtest/include/gtest/gtest.h" |
| 11 | |
| 12 | // Number of bits in a size_t. |
| 13 | static const int kSizeBits = 8 * sizeof(size_t); |
| 14 | // The maximum size of a size_t. |
| 15 | static const size_t kMaxSize = ~static_cast<size_t>(0); |
| 16 | // Maximum positive size of a size_t if it were signed. |
| 17 | static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1); |
| 18 | // An allocation size which is not too big to be reasonable. |
| 19 | static const size_t kNotTooBig = 100000; |
| 20 | // An allocation size which is just too big. |
| 21 | static const size_t kTooBig = ~static_cast<size_t>(0); |
| 22 | |
| 23 | namespace { |
| 24 | |
| 25 | using std::min; |
| 26 | |
| 27 | // Fill a buffer of the specified size with a predetermined pattern |
| 28 | static void Fill(unsigned char* buffer, int n) { |
| 29 | for (int i = 0; i < n; i++) { |
| 30 | buffer[i] = (i & 0xff); |
| 31 | } |
| 32 | } |
| 33 | |
| 34 | // Check that the specified buffer has the predetermined pattern |
| 35 | // generated by Fill() |
| 36 | static bool Valid(unsigned char* buffer, int n) { |
| 37 | for (int i = 0; i < n; i++) { |
| 38 | if (buffer[i] != (i & 0xff)) { |
| 39 | return false; |
| 40 | } |
| 41 | } |
| 42 | return true; |
| 43 | } |
| 44 | |
| 45 | // Check that a buffer is completely zeroed. |
| 46 | static bool IsZeroed(unsigned char* buffer, int n) { |
| 47 | for (int i = 0; i < n; i++) { |
| 48 | if (buffer[i] != 0) { |
| 49 | return false; |
| 50 | } |
| 51 | } |
| 52 | return true; |
| 53 | } |
| 54 | |
| 55 | // Check alignment |
| 56 | static void CheckAlignment(void* p, int align) { |
| 57 | EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1)); |
| 58 | } |
| 59 | |
| 60 | // Return the next interesting size/delta to check. Returns -1 if no more. |
| 61 | static int NextSize(int size) { |
| 62 | if (size < 100) |
| 63 | return size+1; |
| 64 | |
| 65 | if (size < 100000) { |
| 66 | // Find next power of two |
| 67 | int power = 1; |
| 68 | while (power < size) |
| 69 | power <<= 1; |
| 70 | |
| 71 | // Yield (power-1, power, power+1) |
| 72 | if (size < power-1) |
| 73 | return power-1; |
| 74 | |
| 75 | if (size == power-1) |
| 76 | return power; |
| 77 | |
| 78 | assert(size == power); |
| 79 | return power+1; |
| 80 | } else { |
| 81 | return -1; |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | #define GG_ULONGLONG(x) static_cast<uint64>(x) |
| 86 | |
| 87 | template <class AtomicType> |
| 88 | static void TestAtomicIncrement() { |
| 89 | // For now, we just test single threaded execution |
| 90 | |
| 91 | // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go |
| 92 | // outside the expected address bounds. This is in particular to |
| 93 | // test that some future change to the asm code doesn't cause the |
| 94 | // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines. |
| 95 | struct { |
| 96 | AtomicType prev_word; |
| 97 | AtomicType count; |
| 98 | AtomicType next_word; |
| 99 | } s; |
| 100 | |
| 101 | AtomicType prev_word_value, next_word_value; |
| 102 | memset(&prev_word_value, 0xFF, sizeof(AtomicType)); |
| 103 | memset(&next_word_value, 0xEE, sizeof(AtomicType)); |
| 104 | |
| 105 | s.prev_word = prev_word_value; |
| 106 | s.count = 0; |
| 107 | s.next_word = next_word_value; |
| 108 | |
| 109 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1); |
| 110 | EXPECT_EQ(s.count, 1); |
| 111 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 112 | EXPECT_EQ(s.next_word, next_word_value); |
| 113 | |
| 114 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3); |
| 115 | EXPECT_EQ(s.count, 3); |
| 116 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 117 | EXPECT_EQ(s.next_word, next_word_value); |
| 118 | |
| 119 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6); |
| 120 | EXPECT_EQ(s.count, 6); |
| 121 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 122 | EXPECT_EQ(s.next_word, next_word_value); |
| 123 | |
| 124 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3); |
| 125 | EXPECT_EQ(s.count, 3); |
| 126 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 127 | EXPECT_EQ(s.next_word, next_word_value); |
| 128 | |
| 129 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1); |
| 130 | EXPECT_EQ(s.count, 1); |
| 131 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 132 | EXPECT_EQ(s.next_word, next_word_value); |
| 133 | |
| 134 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0); |
| 135 | EXPECT_EQ(s.count, 0); |
| 136 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 137 | EXPECT_EQ(s.next_word, next_word_value); |
| 138 | |
| 139 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1); |
| 140 | EXPECT_EQ(s.count, -1); |
| 141 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 142 | EXPECT_EQ(s.next_word, next_word_value); |
| 143 | |
| 144 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5); |
| 145 | EXPECT_EQ(s.count, -5); |
| 146 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 147 | EXPECT_EQ(s.next_word, next_word_value); |
| 148 | |
| 149 | EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0); |
| 150 | EXPECT_EQ(s.count, 0); |
| 151 | EXPECT_EQ(s.prev_word, prev_word_value); |
| 152 | EXPECT_EQ(s.next_word, next_word_value); |
| 153 | } |
| 154 | |
| 155 | |
| 156 | #define NUM_BITS(T) (sizeof(T) * 8) |
| 157 | |
| 158 | |
| 159 | template <class AtomicType> |
| 160 | static void TestCompareAndSwap() { |
| 161 | AtomicType value = 0; |
| 162 | AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1); |
| 163 | EXPECT_EQ(1, value); |
| 164 | EXPECT_EQ(0, prev); |
| 165 | |
| 166 | // Use test value that has non-zero bits in both halves, more for testing |
| 167 | // 64-bit implementation on 32-bit platforms. |
| 168 | const AtomicType k_test_val = (GG_ULONGLONG(1) << |
| 169 | (NUM_BITS(AtomicType) - 2)) + 11; |
| 170 | value = k_test_val; |
| 171 | prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5); |
| 172 | EXPECT_EQ(k_test_val, value); |
| 173 | EXPECT_EQ(k_test_val, prev); |
| 174 | |
| 175 | value = k_test_val; |
| 176 | prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5); |
| 177 | EXPECT_EQ(5, value); |
| 178 | EXPECT_EQ(k_test_val, prev); |
| 179 | } |
| 180 | |
| 181 | |
| 182 | template <class AtomicType> |
| 183 | static void TestAtomicExchange() { |
| 184 | AtomicType value = 0; |
| 185 | AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1); |
| 186 | EXPECT_EQ(1, value); |
| 187 | EXPECT_EQ(0, new_value); |
| 188 | |
| 189 | // Use test value that has non-zero bits in both halves, more for testing |
| 190 | // 64-bit implementation on 32-bit platforms. |
| 191 | const AtomicType k_test_val = (GG_ULONGLONG(1) << |
| 192 | (NUM_BITS(AtomicType) - 2)) + 11; |
| 193 | value = k_test_val; |
| 194 | new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val); |
| 195 | EXPECT_EQ(k_test_val, value); |
| 196 | EXPECT_EQ(k_test_val, new_value); |
| 197 | |
| 198 | value = k_test_val; |
| 199 | new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5); |
| 200 | EXPECT_EQ(5, value); |
| 201 | EXPECT_EQ(k_test_val, new_value); |
| 202 | } |
| 203 | |
| 204 | |
| 205 | template <class AtomicType> |
| 206 | static void TestAtomicIncrementBounds() { |
| 207 | // Test increment at the half-width boundary of the atomic type. |
| 208 | // It is primarily for testing at the 32-bit boundary for 64-bit atomic type. |
| 209 | AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2); |
| 210 | AtomicType value = test_val - 1; |
| 211 | AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1); |
| 212 | EXPECT_EQ(test_val, value); |
| 213 | EXPECT_EQ(value, new_value); |
| 214 | |
| 215 | base::subtle::NoBarrier_AtomicIncrement(&value, -1); |
| 216 | EXPECT_EQ(test_val - 1, value); |
| 217 | } |
| 218 | |
| 219 | // This is a simple sanity check that values are correct. Not testing |
| 220 | // atomicity |
| 221 | template <class AtomicType> |
| 222 | static void TestStore() { |
| 223 | const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); |
| 224 | const AtomicType kVal2 = static_cast<AtomicType>(-1); |
| 225 | |
| 226 | AtomicType value; |
| 227 | |
| 228 | base::subtle::NoBarrier_Store(&value, kVal1); |
| 229 | EXPECT_EQ(kVal1, value); |
| 230 | base::subtle::NoBarrier_Store(&value, kVal2); |
| 231 | EXPECT_EQ(kVal2, value); |
| 232 | |
| 233 | base::subtle::Acquire_Store(&value, kVal1); |
| 234 | EXPECT_EQ(kVal1, value); |
| 235 | base::subtle::Acquire_Store(&value, kVal2); |
| 236 | EXPECT_EQ(kVal2, value); |
| 237 | |
| 238 | base::subtle::Release_Store(&value, kVal1); |
| 239 | EXPECT_EQ(kVal1, value); |
| 240 | base::subtle::Release_Store(&value, kVal2); |
| 241 | EXPECT_EQ(kVal2, value); |
| 242 | } |
| 243 | |
| 244 | // This is a simple sanity check that values are correct. Not testing |
| 245 | // atomicity |
| 246 | template <class AtomicType> |
| 247 | static void TestLoad() { |
| 248 | const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); |
| 249 | const AtomicType kVal2 = static_cast<AtomicType>(-1); |
| 250 | |
| 251 | AtomicType value; |
| 252 | |
| 253 | value = kVal1; |
| 254 | EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value)); |
| 255 | value = kVal2; |
| 256 | EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value)); |
| 257 | |
| 258 | value = kVal1; |
| 259 | EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value)); |
| 260 | value = kVal2; |
| 261 | EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value)); |
| 262 | |
| 263 | value = kVal1; |
| 264 | EXPECT_EQ(kVal1, base::subtle::Release_Load(&value)); |
| 265 | value = kVal2; |
| 266 | EXPECT_EQ(kVal2, base::subtle::Release_Load(&value)); |
| 267 | } |
| 268 | |
| 269 | template <class AtomicType> |
| 270 | static void TestAtomicOps() { |
| 271 | TestCompareAndSwap<AtomicType>(); |
| 272 | TestAtomicExchange<AtomicType>(); |
| 273 | TestAtomicIncrementBounds<AtomicType>(); |
| 274 | TestStore<AtomicType>(); |
| 275 | TestLoad<AtomicType>(); |
| 276 | } |
| 277 | |
| 278 | static void TestCalloc(size_t n, size_t s, bool ok) { |
| 279 | char* p = reinterpret_cast<char*>(calloc(n, s)); |
| 280 | if (!ok) { |
| 281 | EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed"; |
| 282 | } else { |
| 283 | EXPECT_NE(reinterpret_cast<void*>(NULL), p) << |
| 284 | "calloc(n, s) should succeed"; |
| 285 | for (int i = 0; i < n*s; i++) { |
| 286 | EXPECT_EQ('\0', p[i]); |
| 287 | } |
| 288 | free(p); |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | |
| 293 | // A global test counter for number of times the NewHandler is called. |
| 294 | static int news_handled = 0; |
| 295 | static void TestNewHandler() { |
| 296 | ++news_handled; |
| 297 | throw std::bad_alloc(); |
| 298 | } |
| 299 | |
| 300 | // Because we compile without exceptions, we expect these will not throw. |
| 301 | static void TestOneNewWithoutExceptions(void* (*func)(size_t), |
| 302 | bool should_throw) { |
| 303 | // success test |
| 304 | try { |
| 305 | void* ptr = (*func)(kNotTooBig); |
| 306 | EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) << |
| 307 | "allocation should not have failed."; |
| 308 | } catch(...) { |
| 309 | EXPECT_EQ(0, 1) << "allocation threw unexpected exception."; |
| 310 | } |
| 311 | |
| 312 | // failure test |
| 313 | try { |
| 314 | void* rv = (*func)(kTooBig); |
| 315 | EXPECT_EQ(NULL, rv); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 316 | EXPECT_FALSE(should_throw) << "allocation should have thrown."; |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 317 | } catch(...) { |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 318 | EXPECT_TRUE(should_throw) << "allocation threw unexpected exception."; |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 319 | } |
| 320 | } |
| 321 | |
| 322 | static void TestNothrowNew(void* (*func)(size_t)) { |
| 323 | news_handled = 0; |
| 324 | |
| 325 | // test without new_handler: |
| 326 | std::new_handler saved_handler = std::set_new_handler(0); |
| 327 | TestOneNewWithoutExceptions(func, false); |
| 328 | |
| 329 | // test with new_handler: |
| 330 | std::set_new_handler(TestNewHandler); |
| 331 | TestOneNewWithoutExceptions(func, true); |
| 332 | EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called."; |
| 333 | std::set_new_handler(saved_handler); |
| 334 | } |
| 335 | |
| 336 | } // namespace |
| 337 | |
| 338 | //----------------------------------------------------------------------------- |
| 339 | |
| 340 | TEST(Atomics, AtomicIncrementWord) { |
| 341 | TestAtomicIncrement<AtomicWord>(); |
| 342 | } |
| 343 | |
| 344 | TEST(Atomics, AtomicIncrement32) { |
| 345 | TestAtomicIncrement<Atomic32>(); |
| 346 | } |
| 347 | |
| 348 | TEST(Atomics, AtomicOpsWord) { |
| 349 | TestAtomicIncrement<AtomicWord>(); |
| 350 | } |
| 351 | |
| 352 | TEST(Atomics, AtomicOps32) { |
| 353 | TestAtomicIncrement<Atomic32>(); |
| 354 | } |
| 355 | |
| 356 | TEST(Allocators, Malloc) { |
| 357 | // Try allocating data with a bunch of alignments and sizes |
| 358 | for (int size = 1; size < 1048576; size *= 2) { |
| 359 | unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size)); |
| 360 | CheckAlignment(ptr, 2); // Should be 2 byte aligned |
| 361 | Fill(ptr, size); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 362 | EXPECT_TRUE(Valid(ptr, size)); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 363 | free(ptr); |
| 364 | } |
| 365 | } |
| 366 | |
| 367 | TEST(Allocators, Calloc) { |
| 368 | TestCalloc(0, 0, true); |
| 369 | TestCalloc(0, 1, true); |
| 370 | TestCalloc(1, 1, true); |
| 371 | TestCalloc(1<<10, 0, true); |
| 372 | TestCalloc(1<<20, 0, true); |
| 373 | TestCalloc(0, 1<<10, true); |
| 374 | TestCalloc(0, 1<<20, true); |
| 375 | TestCalloc(1<<20, 2, true); |
| 376 | TestCalloc(2, 1<<20, true); |
| 377 | TestCalloc(1000, 1000, true); |
| 378 | |
| 379 | TestCalloc(kMaxSize, 2, false); |
| 380 | TestCalloc(2, kMaxSize, false); |
| 381 | TestCalloc(kMaxSize, kMaxSize, false); |
| 382 | |
| 383 | TestCalloc(kMaxSignedSize, 3, false); |
| 384 | TestCalloc(3, kMaxSignedSize, false); |
| 385 | TestCalloc(kMaxSignedSize, kMaxSignedSize, false); |
| 386 | } |
| 387 | |
| 388 | TEST(Allocators, New) { |
| 389 | TestNothrowNew(&::operator new); |
| 390 | TestNothrowNew(&::operator new[]); |
| 391 | } |
| 392 | |
| 393 | // This makes sure that reallocing a small number of bytes in either |
| 394 | // direction doesn't cause us to allocate new memory. |
| 395 | TEST(Allocators, Realloc1) { |
| 396 | int start_sizes[] = { 100, 1000, 10000, 100000 }; |
| 397 | int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 }; |
| 398 | |
| 399 | for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) { |
| 400 | void* p = malloc(start_sizes[s]); |
[email protected] | a42d463 | 2011-10-26 21:48:00 | [diff] [blame] | 401 | ASSERT_TRUE(p); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 402 | // The larger the start-size, the larger the non-reallocing delta. |
| 403 | for (int d = 0; d < s*2; ++d) { |
| 404 | void* new_p = realloc(p, start_sizes[s] + deltas[d]); |
[email protected] | a42d463 | 2011-10-26 21:48:00 | [diff] [blame] | 405 | ASSERT_EQ(p, new_p); // realloc should not allocate new memory |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 406 | } |
| 407 | // Test again, but this time reallocing smaller first. |
| 408 | for (int d = 0; d < s*2; ++d) { |
| 409 | void* new_p = realloc(p, start_sizes[s] - deltas[d]); |
[email protected] | a42d463 | 2011-10-26 21:48:00 | [diff] [blame] | 410 | ASSERT_EQ(p, new_p); // realloc should not allocate new memory |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 411 | } |
| 412 | free(p); |
| 413 | } |
| 414 | } |
| 415 | |
| 416 | TEST(Allocators, Realloc2) { |
| 417 | for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) { |
| 418 | for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) { |
| 419 | unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size)); |
| 420 | Fill(src, src_size); |
| 421 | unsigned char* dst = |
| 422 | reinterpret_cast<unsigned char*>(realloc(src, dst_size)); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 423 | EXPECT_TRUE(Valid(dst, min(src_size, dst_size))); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 424 | Fill(dst, dst_size); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 425 | EXPECT_TRUE(Valid(dst, dst_size)); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 426 | if (dst != NULL) free(dst); |
| 427 | } |
| 428 | } |
| 429 | |
| 430 | // Now make sure realloc works correctly even when we overflow the |
| 431 | // packed cache, so some entries are evicted from the cache. |
| 432 | // The cache has 2^12 entries, keyed by page number. |
| 433 | const int kNumEntries = 1 << 14; |
| 434 | int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries)); |
| 435 | int sum = 0; |
| 436 | for (int i = 0; i < kNumEntries; i++) { |
| 437 | // no page size is likely to be bigger than 8192? |
| 438 | p[i] = reinterpret_cast<int*>(malloc(8192)); |
| 439 | p[i][1000] = i; // use memory deep in the heart of p |
| 440 | } |
| 441 | for (int i = 0; i < kNumEntries; i++) { |
| 442 | p[i] = reinterpret_cast<int*>(realloc(p[i], 9000)); |
| 443 | } |
| 444 | for (int i = 0; i < kNumEntries; i++) { |
| 445 | sum += p[i][1000]; |
| 446 | free(p[i]); |
| 447 | } |
| 448 | EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even |
| 449 | free(p); |
| 450 | } |
| 451 | |
| 452 | TEST(Allocators, ReallocZero) { |
| 453 | // Test that realloc to zero does not return NULL. |
| 454 | for (int size = 0; size >= 0; size = NextSize(size)) { |
| 455 | char* ptr = reinterpret_cast<char*>(malloc(size)); |
| 456 | EXPECT_NE(static_cast<char*>(NULL), ptr); |
| 457 | ptr = reinterpret_cast<char*>(realloc(ptr, 0)); |
| 458 | EXPECT_NE(static_cast<char*>(NULL), ptr); |
| 459 | if (ptr) |
| 460 | free(ptr); |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | #ifdef WIN32 |
| 465 | // Test recalloc |
| 466 | TEST(Allocators, Recalloc) { |
| 467 | for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) { |
| 468 | for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) { |
| 469 | unsigned char* src = |
| 470 | reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size)); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 471 | EXPECT_TRUE(IsZeroed(src, src_size)); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 472 | Fill(src, src_size); |
| 473 | unsigned char* dst = |
| 474 | reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size)); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 475 | EXPECT_TRUE(Valid(dst, min(src_size, dst_size))); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 476 | Fill(dst, dst_size); |
[email protected] | 515f2492 | 2011-01-14 10:36:28 | [diff] [blame] | 477 | EXPECT_TRUE(Valid(dst, dst_size)); |
[email protected] | 6a7e698 | 2009-11-25 03:21:47 | [diff] [blame] | 478 | if (dst != NULL) |
| 479 | free(dst); |
| 480 | } |
| 481 | } |
| 482 | } |
| 483 | #endif |
| 484 | |
| 485 | |
| 486 | int main(int argc, char** argv) { |
| 487 | testing::InitGoogleTest(&argc, argv); |
| 488 | return RUN_ALL_TESTS(); |
| 489 | } |