gpu: Allow waiting on sync tokens without sync token client.

This CL adds SyncPointManager::Wait which takes a sync token and an
order number. This enables the upcoming gpu scheduler to wait on sync
tokens that a flush depends on without executing the flush. Such waits
are safe from deadlock if the order number of the message is used.

Misc cleanup in this CL:
1. Hide SyncPointClientState and instead use sync tokens for waiting.
2. The wait methods do not run the callback if the wait is invalid.
3. The cmd decoder wait sync token callback returns true on valid waits.
4. Added few more cmd buffer message types that don't need MakeCurrent.
5. Changed the sync point tests to use sync tokens everywhere.

[email protected]
BUG=514813
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel

Review-Url: https://codereview.chromium.org/2722883002
Cr-Commit-Position: refs/heads/master@{#454361}
diff --git a/android_webview/browser/deferred_gpu_command_service.cc b/android_webview/browser/deferred_gpu_command_service.cc
index 6f0a146..648c3c3 100644
--- a/android_webview/browser/deferred_gpu_command_service.cc
+++ b/android_webview/browser/deferred_gpu_command_service.cc
@@ -71,8 +71,7 @@
 DeferredGpuCommandService::DeferredGpuCommandService()
     : gpu::InProcessCommandBuffer::Service(
           content::GetGpuPreferencesFromCommandLine()),
-      sync_point_manager_(new gpu::SyncPointManager(true)) {
-}
+      sync_point_manager_(new gpu::SyncPointManager()) {}
 
 DeferredGpuCommandService::~DeferredGpuCommandService() {
   base::AutoLock lock(tasks_lock_);
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
index aad78dd..15aa3b1b 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -31,6 +31,7 @@
 #include "gpu/command_buffer/common/gles2_cmd_format.h"
 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
 #include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/sync_token.h"
 #include "gpu/command_buffer/service/buffer_manager.h"
 #include "gpu/command_buffer/service/context_group.h"
 #include "gpu/command_buffer/service/context_state.h"
@@ -594,7 +595,7 @@
   void SetShaderCacheCallback(const ShaderCacheCallback& callback) override;
   void SetFenceSyncReleaseCallback(
       const FenceSyncReleaseCallback& callback) override;
-  void SetWaitFenceSyncCallback(const WaitFenceSyncCallback& callback) override;
+  void SetWaitSyncTokenCallback(const WaitSyncTokenCallback& callback) override;
 
   void SetDescheduleUntilFinishedCallback(
       const NoParamCallback& callback) override;
@@ -2302,7 +2303,7 @@
   std::unique_ptr<ImageManager> image_manager_;
 
   FenceSyncReleaseCallback fence_sync_release_callback_;
-  WaitFenceSyncCallback wait_fence_sync_callback_;
+  WaitSyncTokenCallback wait_sync_token_callback_;
   NoParamCallback deschedule_until_finished_callback_;
   NoParamCallback reschedule_after_finished_callback_;
 
@@ -4660,9 +4661,9 @@
   fence_sync_release_callback_ = callback;
 }
 
-void GLES2DecoderImpl::SetWaitFenceSyncCallback(
-    const WaitFenceSyncCallback& callback) {
-  wait_fence_sync_callback_ = callback;
+void GLES2DecoderImpl::SetWaitSyncTokenCallback(
+    const WaitSyncTokenCallback& callback) {
+  wait_sync_token_callback_ = callback;
 }
 
 void GLES2DecoderImpl::SetDescheduleUntilFinishedCallback(
@@ -15822,12 +15823,14 @@
   const CommandBufferId command_buffer_id =
       CommandBufferId::FromUnsafeValue(c.command_buffer_id());
   const uint64_t release = c.release_count();
-  if (wait_fence_sync_callback_.is_null())
+  if (wait_sync_token_callback_.is_null())
     return error::kNoError;
 
-  return wait_fence_sync_callback_.Run(namespace_id, command_buffer_id, release)
-             ? error::kNoError
-             : error::kDeferCommandUntilLater;
+  gpu::SyncToken sync_token;
+  sync_token.Set(namespace_id, 0, command_buffer_id, release);
+  return wait_sync_token_callback_.Run(sync_token)
+             ? error::kDeferCommandUntilLater
+             : error::kNoError;
 }
 
 error::Error GLES2DecoderImpl::HandleDiscardBackbufferCHROMIUM(
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
index c53b522..d7d8a23 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -36,6 +36,7 @@
 namespace gpu {
 
 struct Mailbox;
+struct SyncToken;
 
 namespace gles2 {
 
@@ -85,10 +86,7 @@
  public:
   typedef error::Error Error;
   typedef base::Callback<void(uint64_t release)> FenceSyncReleaseCallback;
-  typedef base::Callback<bool(gpu::CommandBufferNamespace namespace_id,
-                              gpu::CommandBufferId command_buffer_id,
-                              uint64_t release)>
-      WaitFenceSyncCallback;
+  typedef base::Callback<bool(const gpu::SyncToken&)> WaitSyncTokenCallback;
   typedef base::Callback<void(void)> NoParamCallback;
 
   // The default stencil mask, which has all bits set.  This really should be a
@@ -281,11 +279,12 @@
   virtual void SetShaderCacheCallback(const ShaderCacheCallback& callback) = 0;
 
   // Sets the callback for fence sync release and wait calls. The wait call
-  // returns true if the channel is still scheduled.
+  // returns false if the wait was a nop or invalid and the command buffer is
+  // still scheduled.
   virtual void SetFenceSyncReleaseCallback(
       const FenceSyncReleaseCallback& callback) = 0;
-  virtual void SetWaitFenceSyncCallback(
-      const WaitFenceSyncCallback& callback) = 0;
+  virtual void SetWaitSyncTokenCallback(
+      const WaitSyncTokenCallback& callback) = 0;
 
   // Sets the callback for the DescheduleUntilFinished and
   // RescheduleAfterFinished calls.
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index 206f6d3..64ca1a8 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -149,8 +149,8 @@
                void(const ShaderCacheCallback& callback));
   MOCK_METHOD1(SetFenceSyncReleaseCallback,
                void(const FenceSyncReleaseCallback& callback));
-  MOCK_METHOD1(SetWaitFenceSyncCallback,
-               void(const WaitFenceSyncCallback& callback));
+  MOCK_METHOD1(SetWaitSyncTokenCallback,
+               void(const WaitSyncTokenCallback& callback));
   MOCK_METHOD1(SetDescheduleUntilFinishedCallback,
                void(const NoParamCallback& callback));
   MOCK_METHOD1(SetRescheduleAfterFinishedCallback,
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 178f369..86f13e5 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -425,9 +425,9 @@
   fence_sync_release_callback_ = callback;
 }
 
-void GLES2DecoderPassthroughImpl::SetWaitFenceSyncCallback(
-    const WaitFenceSyncCallback& callback) {
-  wait_fence_sync_callback_ = callback;
+void GLES2DecoderPassthroughImpl::SetWaitSyncTokenCallback(
+    const WaitSyncTokenCallback& callback) {
+  wait_sync_token_callback_ = callback;
 }
 
 void GLES2DecoderPassthroughImpl::SetDescheduleUntilFinishedCallback(
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index 0193b4aa..cd7ef379 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -12,6 +12,7 @@
 #include "gpu/command_buffer/common/gles2_cmd_format.h"
 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
 #include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/sync_token.h"
 #include "gpu/command_buffer/service/client_service_map.h"
 #include "gpu/command_buffer/service/context_group.h"
 #include "gpu/command_buffer/service/gles2_cmd_decoder.h"
@@ -134,7 +135,7 @@
   // returns true if the channel is still scheduled.
   void SetFenceSyncReleaseCallback(
       const FenceSyncReleaseCallback& callback) override;
-  void SetWaitFenceSyncCallback(const WaitFenceSyncCallback& callback) override;
+  void SetWaitSyncTokenCallback(const WaitSyncTokenCallback& callback) override;
   void SetDescheduleUntilFinishedCallback(
       const NoParamCallback& callback) override;
   void SetRescheduleAfterFinishedCallback(
@@ -327,7 +328,7 @@
 
   // Callbacks
   FenceSyncReleaseCallback fence_sync_release_callback_;
-  WaitFenceSyncCallback wait_fence_sync_callback_;
+  WaitSyncTokenCallback wait_sync_token_callback_;
 
   // Some objects may generate resources when they are bound even if they were
   // not generated yet: texture, buffer, renderbuffer, framebuffer, transform
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index 8604e0e0..74c7a4a6 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -3097,13 +3097,13 @@
     CommandBufferNamespace namespace_id,
     CommandBufferId command_buffer_id,
     GLuint64 release_count) {
-  if (wait_fence_sync_callback_.is_null()) {
+  if (wait_sync_token_callback_.is_null()) {
     return error::kNoError;
   }
-  return wait_fence_sync_callback_.Run(namespace_id, command_buffer_id,
-                                       release_count)
-             ? error::kNoError
-             : error::kDeferCommandUntilLater;
+  SyncToken sync_token(namespace_id, 0, command_buffer_id, release_count);
+  return wait_sync_token_callback_.Run(sync_token)
+             ? error::kDeferCommandUntilLater
+             : error::kNoError;
 }
 
 error::Error GLES2DecoderPassthroughImpl::DoDrawBuffersEXT(
diff --git a/gpu/command_buffer/service/sync_point_manager.cc b/gpu/command_buffer/service/sync_point_manager.cc
index 04a92e0..85210e8 100644
--- a/gpu/command_buffer/service/sync_point_manager.cc
+++ b/gpu/command_buffer/service/sync_point_manager.cc
@@ -8,15 +8,10 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <climits>
-
 #include "base/bind.h"
-#include "base/containers/hash_tables.h"
 #include "base/location.h"
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/rand_util.h"
-#include "base/sequence_checker.h"
+#include "base/memory/ref_counted.h"
 #include "base/single_thread_task_runner.h"
 
 namespace gpu {
@@ -146,12 +141,7 @@
 
 SyncPointOrderData::OrderFence::~OrderFence() {}
 
-SyncPointOrderData::SyncPointOrderData()
-    : current_order_num_(0),
-      paused_(false),
-      destroyed_(false),
-      processed_order_num_(0),
-      unprocessed_order_num_(0) {}
+SyncPointOrderData::SyncPointOrderData() {}
 
 SyncPointOrderData::~SyncPointOrderData() {}
 
@@ -164,8 +154,8 @@
   if (destroyed_)
     return false;
 
-  // Release should have a possible unprocessed order number lower
-  // than the wait order number.
+  // Release should have a possible unprocessed order number lower than the wait
+  // order number.
   if ((processed_order_num_ + 1) >= wait_order_num)
     return false;
 
@@ -175,7 +165,7 @@
 
   // So far it could be valid, but add an order fence guard to be sure it
   // gets released eventually.
-  const uint32_t expected_order_num =
+  uint32_t expected_order_num =
       std::min(unprocessed_order_num_, wait_order_num);
   order_fence_queue_.push(OrderFence(expected_order_num, fence_release,
                                      release_callback, client_state));
@@ -194,37 +184,32 @@
 
 SyncPointClientState::SyncPointClientState(
     scoped_refptr<SyncPointOrderData> order_data)
-    : order_data_(order_data), fence_sync_release_(0) {}
+    : order_data_(order_data) {}
 
-SyncPointClientState::~SyncPointClientState() {
+SyncPointClientState::~SyncPointClientState() {}
+
+bool SyncPointClientState::IsFenceSyncReleased(uint64_t release) {
+  base::AutoLock lock(fence_sync_lock_);
+  return release <= fence_sync_release_;
 }
 
-bool SyncPointClientState::WaitForRelease(CommandBufferNamespace namespace_id,
-                                          CommandBufferId client_id,
+bool SyncPointClientState::WaitForRelease(uint64_t release,
                                           uint32_t wait_order_num,
-                                          uint64_t release,
                                           const base::Closure& callback) {
   // Lock must be held the whole time while we validate otherwise it could be
   // released while we are checking.
   {
     base::AutoLock auto_lock(fence_sync_lock_);
-    if (release > fence_sync_release_) {
-      if (!order_data_->ValidateReleaseOrderNumber(this, wait_order_num,
-                                                   release, callback)) {
-        return false;
-      } else {
-        // Add the callback which will be called upon release.
-        release_callback_queue_.push(ReleaseCallback(release, callback));
-        if (!on_wait_callback_.is_null())
-          on_wait_callback_.Run(namespace_id, client_id);
-        return true;
-      }
+    if (release > fence_sync_release_ &&
+        order_data_->ValidateReleaseOrderNumber(this, wait_order_num, release,
+                                                callback)) {
+      // Add the callback which will be called upon release.
+      release_callback_queue_.push(ReleaseCallback(release, callback));
+      return true;
     }
   }
-
-  // Already released, run the callback now.
-  callback.Run();
-  return true;
+  // Already released, do not run the callback.
+  return false;
 }
 
 void SyncPointClientState::ReleaseFenceSync(uint64_t release) {
@@ -232,10 +217,11 @@
   std::vector<base::Closure> callback_list;
   {
     base::AutoLock auto_lock(fence_sync_lock_);
+
     DLOG_IF(ERROR, release <= fence_sync_release_)
         << "Client submitted fence releases out of order.";
-
     fence_sync_release_ = release;
+
     while (!release_callback_queue_.empty() &&
            release_callback_queue_.top().release_count <= release) {
       callback_list.push_back(release_callback_queue_.top().callback_closure);
@@ -286,166 +272,149 @@
   }
 }
 
-void SyncPointClientState::SetOnWaitCallback(const OnWaitCallback& callback) {
-  on_wait_callback_ = callback;
+SyncPointClient::SyncPointClient(SyncPointManager* sync_point_manager,
+                                 scoped_refptr<SyncPointOrderData> order_data,
+                                 CommandBufferNamespace namespace_id,
+                                 CommandBufferId command_buffer_id)
+    : sync_point_manager_(sync_point_manager),
+      order_data_(order_data),
+      client_state_(new SyncPointClientState(order_data)),
+      namespace_id_(namespace_id),
+      command_buffer_id_(command_buffer_id) {
+  sync_point_manager_->RegisterSyncPointClient(client_state_, namespace_id,
+                                               command_buffer_id);
 }
 
 SyncPointClient::~SyncPointClient() {
-  if (namespace_id_ != gpu::CommandBufferNamespace::INVALID) {
-    // Release all fences on destruction.
-    client_state_->ReleaseFenceSync(UINT64_MAX);
-
-    sync_point_manager_->DestroySyncPointClient(namespace_id_, client_id_);
-  }
+  // Release all fences on destruction.
+  client_state_->ReleaseFenceSync(UINT64_MAX);
+  sync_point_manager_->DeregisterSyncPointClient(namespace_id_,
+                                                 command_buffer_id_);
 }
 
-bool SyncPointClient::Wait(SyncPointClientState* release_state,
-                           uint64_t release_count,
-                           const base::Closure& wait_complete_callback) {
+bool SyncPointClient::Wait(const SyncToken& sync_token,
+                           const base::Closure& callback) {
   // Validate that this Wait call is between BeginProcessingOrderNumber() and
   // FinishProcessingOrderNumber(), or else we may deadlock.
-  DCHECK(client_state_->order_data()->IsProcessingOrderNumber());
-
-  const uint32_t wait_order_number =
-      client_state_->order_data()->current_order_num();
-
-  // If waiting on self or wait was invalid, call the callback and return false.
-  if (client_state_ == release_state ||
-      !release_state->WaitForRelease(namespace_id_, client_id_,
-                                     wait_order_number, release_count,
-                                     wait_complete_callback)) {
-    wait_complete_callback.Run();
+  DCHECK(order_data_->IsProcessingOrderNumber());
+  if (sync_token.namespace_id() == namespace_id_ &&
+      sync_token.command_buffer_id() == command_buffer_id_) {
     return false;
   }
-  return true;
+  uint32_t wait_order_number = order_data_->current_order_num();
+  return sync_point_manager_->Wait(sync_token, wait_order_number, callback);
 }
 
 bool SyncPointClient::WaitNonThreadSafe(
-    SyncPointClientState* release_state,
-    uint64_t release_count,
-    scoped_refptr<base::SingleThreadTaskRunner> runner,
-    const base::Closure& wait_complete_callback) {
-  return Wait(release_state, release_count,
-              base::Bind(&RunOnThread, runner, wait_complete_callback));
-}
-
-bool SyncPointClient::WaitOutOfOrder(
-    SyncPointClientState* release_state,
-    uint64_t release_count,
-    const base::Closure& wait_complete_callback) {
-  // Validate that this Wait call is not between BeginProcessingOrderNumber()
-  // and FinishProcessingOrderNumber(), or else we may deadlock.
-  DCHECK(!client_state_ ||
-         !client_state_->order_data()->IsProcessingOrderNumber());
-
-  // No order number associated with the current execution context, using
-  // UINT32_MAX will just assume the release is in the SyncPointClientState's
-  // order numbers to be executed.
-  if (!release_state->WaitForRelease(namespace_id_, client_id_, UINT32_MAX,
-                                     release_count, wait_complete_callback)) {
-    wait_complete_callback.Run();
-    return false;
-  }
-  return true;
-}
-
-bool SyncPointClient::WaitOutOfOrderNonThreadSafe(
-    SyncPointClientState* release_state,
-    uint64_t release_count,
-    scoped_refptr<base::SingleThreadTaskRunner> runner,
-    const base::Closure& wait_complete_callback) {
-  return WaitOutOfOrder(
-      release_state, release_count,
-      base::Bind(&RunOnThread, runner, wait_complete_callback));
+    const SyncToken& sync_token,
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+    const base::Closure& callback) {
+  return Wait(sync_token, base::Bind(&RunOnThread, task_runner, callback));
 }
 
 void SyncPointClient::ReleaseFenceSync(uint64_t release) {
   // Validate that this Release call is between BeginProcessingOrderNumber() and
   // FinishProcessingOrderNumber(), or else we may deadlock.
-  DCHECK(client_state_->order_data()->IsProcessingOrderNumber());
+  DCHECK(order_data_->IsProcessingOrderNumber());
   client_state_->ReleaseFenceSync(release);
 }
 
-void SyncPointClient::SetOnWaitCallback(const OnWaitCallback& callback) {
-  client_state_->SetOnWaitCallback(callback);
-}
-
-SyncPointClient::SyncPointClient()
-    : sync_point_manager_(nullptr),
-      namespace_id_(gpu::CommandBufferNamespace::INVALID),
-      client_id_() {}
-
-SyncPointClient::SyncPointClient(SyncPointManager* sync_point_manager,
-                                 scoped_refptr<SyncPointOrderData> order_data,
-                                 CommandBufferNamespace namespace_id,
-                                 CommandBufferId client_id)
-    : sync_point_manager_(sync_point_manager),
-      client_state_(new SyncPointClientState(order_data)),
-      namespace_id_(namespace_id),
-      client_id_(client_id) {}
-
-SyncPointManager::SyncPointManager(bool allow_threaded_wait) {
+SyncPointManager::SyncPointManager() {
   global_order_num_.GetNext();
 }
 
 SyncPointManager::~SyncPointManager() {
-  for (const ClientMap& client_map : client_maps_) {
-    DCHECK(client_map.empty());
-  }
+  for (const ClientStateMap& client_state_map : client_state_maps_)
+    DCHECK(client_state_map.empty());
 }
 
-std::unique_ptr<SyncPointClient> SyncPointManager::CreateSyncPointClient(
-    scoped_refptr<SyncPointOrderData> order_data,
+bool SyncPointManager::IsSyncTokenReleased(const SyncToken& sync_token) {
+  scoped_refptr<SyncPointClientState> release_state = GetSyncPointClientState(
+      sync_token.namespace_id(), sync_token.command_buffer_id());
+  if (release_state)
+    return release_state->IsFenceSyncReleased(sync_token.release_count());
+  return true;
+}
+
+bool SyncPointManager::Wait(const SyncToken& sync_token,
+                            uint32_t wait_order_num,
+                            const base::Closure& callback) {
+  scoped_refptr<SyncPointClientState> release_state = GetSyncPointClientState(
+      sync_token.namespace_id(), sync_token.command_buffer_id());
+  if (release_state &&
+      release_state->WaitForRelease(sync_token.release_count(), wait_order_num,
+                                    callback)) {
+    return true;
+  }
+  // Do not run callback if wait is invalid.
+  return false;
+}
+
+bool SyncPointManager::WaitNonThreadSafe(
+    const SyncToken& sync_token,
+    uint32_t wait_order_num,
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+    const base::Closure& callback) {
+  return Wait(sync_token, wait_order_num,
+              base::Bind(&RunOnThread, task_runner, callback));
+}
+
+bool SyncPointManager::WaitOutOfOrder(const SyncToken& trusted_sync_token,
+                                      const base::Closure& callback) {
+  // No order number associated with the current execution context, using
+  // UINT32_MAX will just assume the release is in the SyncPointClientState's
+  // order numbers to be executed.
+  return Wait(trusted_sync_token, UINT32_MAX, callback);
+}
+
+bool SyncPointManager::WaitOutOfOrderNonThreadSafe(
+    const SyncToken& trusted_sync_token,
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+    const base::Closure& callback) {
+  return WaitOutOfOrder(trusted_sync_token,
+                        base::Bind(&RunOnThread, task_runner, callback));
+}
+
+void SyncPointManager::RegisterSyncPointClient(
+    scoped_refptr<SyncPointClientState> client_state,
     CommandBufferNamespace namespace_id,
-    CommandBufferId client_id) {
+    CommandBufferId command_buffer_id) {
   DCHECK_GE(namespace_id, 0);
-  DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_));
-  base::AutoLock auto_lock(client_maps_lock_);
+  DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
 
-  ClientMap& client_map = client_maps_[namespace_id];
-  std::pair<ClientMap::iterator, bool> result = client_map.insert(
-      std::make_pair(client_id, new SyncPointClient(this, order_data,
-                                                    namespace_id, client_id)));
-  DCHECK(result.second);
-
-  return base::WrapUnique(result.first->second);
+  base::AutoLock auto_lock(client_state_maps_lock_);
+  DCHECK(!client_state_maps_[namespace_id].count(command_buffer_id));
+  client_state_maps_[namespace_id].insert(
+      std::make_pair(command_buffer_id, client_state));
 }
 
-std::unique_ptr<SyncPointClient>
-SyncPointManager::CreateSyncPointClientWaiter() {
-  return base::WrapUnique(new SyncPointClient);
-}
-
-scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
+void SyncPointManager::DeregisterSyncPointClient(
     CommandBufferNamespace namespace_id,
-    CommandBufferId client_id) {
-  if (namespace_id >= 0) {
-    DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_));
-    base::AutoLock auto_lock(client_maps_lock_);
-    ClientMap& client_map = client_maps_[namespace_id];
-    ClientMap::iterator it = client_map.find(client_id);
-    if (it != client_map.end()) {
-      return it->second->client_state();
-    }
-  }
-  return nullptr;
+    CommandBufferId command_buffer_id) {
+  DCHECK_GE(namespace_id, 0);
+  DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+
+  base::AutoLock auto_lock(client_state_maps_lock_);
+  DCHECK(client_state_maps_[namespace_id].count(command_buffer_id));
+  client_state_maps_[namespace_id].erase(command_buffer_id);
 }
 
 uint32_t SyncPointManager::GenerateOrderNumber() {
   return global_order_num_.GetNext();
 }
 
-void SyncPointManager::DestroySyncPointClient(
+scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
     CommandBufferNamespace namespace_id,
-    CommandBufferId client_id) {
-  DCHECK_GE(namespace_id, 0);
-  DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_));
-
-  base::AutoLock auto_lock(client_maps_lock_);
-  ClientMap& client_map = client_maps_[namespace_id];
-  ClientMap::iterator it = client_map.find(client_id);
-  DCHECK(it != client_map.end());
-  client_map.erase(it);
+    CommandBufferId command_buffer_id) {
+  if (namespace_id >= 0) {
+    DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+    base::AutoLock auto_lock(client_state_maps_lock_);
+    ClientStateMap& client_state_map = client_state_maps_[namespace_id];
+    auto it = client_state_map.find(command_buffer_id);
+    if (it != client_state_map.end())
+      return it->second;
+  }
+  return nullptr;
 }
 
 }  // namespace gpu
diff --git a/gpu/command_buffer/service/sync_point_manager.h b/gpu/command_buffer/service/sync_point_manager.h
index 7a3695c..a34a17c 100644
--- a/gpu/command_buffer/service/sync_point_manager.h
+++ b/gpu/command_buffer/service/sync_point_manager.h
@@ -23,6 +23,7 @@
 #include "base/threading/thread_checker.h"
 #include "gpu/command_buffer/common/command_buffer_id.h"
 #include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/sync_token.h"
 #include "gpu/gpu_export.h"
 
 namespace base {
@@ -66,9 +67,14 @@
     return !paused_ && current_order_num_ > processed_order_num();
   }
 
+  bool ValidateReleaseOrderNumber(
+      scoped_refptr<SyncPointClientState> client_state,
+      uint32_t wait_order_num,
+      uint64_t fence_release,
+      const base::Closure& release_callback);
+
  private:
   friend class base::RefCountedThreadSafe<SyncPointOrderData>;
-  friend class SyncPointClientState;
 
   struct OrderFence {
     uint32_t order_num;
@@ -84,32 +90,26 @@
     ~OrderFence();
 
     bool operator>(const OrderFence& rhs) const {
-      return (order_num > rhs.order_num) ||
-             ((order_num == rhs.order_num) &&
-              (fence_release > rhs.fence_release));
+      return std::tie(order_num, fence_release) >
+             std::tie(rhs.order_num, rhs.fence_release);
     }
   };
   typedef std::priority_queue<OrderFence,
                               std::vector<OrderFence>,
-                              std::greater<OrderFence>> OrderFenceQueue;
+                              std::greater<OrderFence>>
+      OrderFenceQueue;
 
   SyncPointOrderData();
   ~SyncPointOrderData();
 
-  bool ValidateReleaseOrderNumber(
-      scoped_refptr<SyncPointClientState> client_state,
-      uint32_t wait_order_num,
-      uint64_t fence_release,
-      const base::Closure& release_callback);
-
   // Non thread-safe functions need to be called from a single thread.
   base::ThreadChecker processing_thread_checker_;
 
   // Current IPC order number being processed (only used on processing thread).
-  uint32_t current_order_num_;
+  uint32_t current_order_num_ = 0;
 
   // Whether or not the current order number is being processed or paused.
-  bool paused_;
+  bool paused_ = false;
 
   // This lock protects destroyed_, processed_order_num_,
   // unprocessed_order_num_, and order_fence_queue_. All order numbers (n) in
@@ -117,13 +117,13 @@
   //   processed_order_num_ < n <= unprocessed_order_num_.
   mutable base::Lock lock_;
 
-  bool destroyed_;
+  bool destroyed_ = false;
 
   // Last finished IPC order number.
-  uint32_t processed_order_num_;
+  uint32_t processed_order_num_ = 0;
 
   // Unprocessed order number expected to be processed under normal execution.
-  uint32_t unprocessed_order_num_;
+  uint32_t unprocessed_order_num_ = 0;
 
   // In situations where we are waiting on fence syncs that do not exist, we
   // validate by making sure the order number does not pass the order number
@@ -137,24 +137,30 @@
   DISALLOW_COPY_AND_ASSIGN(SyncPointOrderData);
 };
 
+// Internal state for sync point clients.
 class GPU_EXPORT SyncPointClientState
     : public base::RefCountedThreadSafe<SyncPointClientState> {
  public:
-  scoped_refptr<SyncPointOrderData> order_data() { return order_data_; }
+  explicit SyncPointClientState(scoped_refptr<SyncPointOrderData> order_data);
 
-  bool IsFenceSyncReleased(uint64_t release) {
-    return release <= fence_sync_release();
-  }
+  bool IsFenceSyncReleased(uint64_t release);
 
-  uint64_t fence_sync_release() {
-    base::AutoLock auto_lock(fence_sync_lock_);
-    return fence_sync_release_;
-  }
+  // Queues the callback to be called if the release is valid. If the release
+  // is invalid this function will return False and the callback will never
+  // be called.
+  bool WaitForRelease(uint64_t release,
+                      uint32_t wait_order_num,
+                      const base::Closure& callback);
+
+  // Releases a fence sync and all fence syncs below.
+  void ReleaseFenceSync(uint64_t release);
+
+  // Does not release the fence sync, but releases callbacks waiting on that
+  // fence sync.
+  void EnsureWaitReleased(uint64_t release, const base::Closure& callback);
 
  private:
   friend class base::RefCountedThreadSafe<SyncPointClientState>;
-  friend class SyncPointClient;
-  friend class SyncPointOrderData;
 
   struct ReleaseCallback {
     uint64_t release_count;
@@ -173,29 +179,8 @@
                               std::greater<ReleaseCallback>>
       ReleaseCallbackQueue;
 
-  SyncPointClientState(scoped_refptr<SyncPointOrderData> order_data);
   ~SyncPointClientState();
 
-  // Queues the callback to be called if the release is valid. If the release
-  // is invalid this function will return False and the callback will never
-  // be called.
-  bool WaitForRelease(CommandBufferNamespace namespace_id,
-                      CommandBufferId client_id,
-                      uint32_t wait_order_num,
-                      uint64_t release,
-                      const base::Closure& callback);
-
-  // Releases a fence sync and all fence syncs below.
-  void ReleaseFenceSync(uint64_t release);
-
-  // Does not release the fence sync, but releases callbacks waiting on that
-  // fence sync.
-  void EnsureWaitReleased(uint64_t release, const base::Closure& callback);
-
-  typedef base::Callback<void(CommandBufferNamespace, CommandBufferId)>
-      OnWaitCallback;
-  void SetOnWaitCallback(const OnWaitCallback& callback);
-
   // Global order data where releases will originate from.
   scoped_refptr<SyncPointOrderData> order_data_;
 
@@ -203,87 +188,47 @@
   base::Lock fence_sync_lock_;
 
   // Current fence sync release that has been signaled.
-  uint64_t fence_sync_release_;
+  uint64_t fence_sync_release_ = 0;
 
   // In well defined fence sync operations, fence syncs are released in order
   // so simply having a priority queue for callbacks is enough.
   ReleaseCallbackQueue release_callback_queue_;
 
-  // Called when a release callback is queued.
-  OnWaitCallback on_wait_callback_;
-
   DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
 };
 
 class GPU_EXPORT SyncPointClient {
  public:
-  ~SyncPointClient();
-
-  scoped_refptr<SyncPointClientState> client_state() { return client_state_; }
-
-  // Wait for a release count to be reached on a SyncPointClientState. If this
-  // function returns false, that means the wait was invalid. Otherwise if it
-  // returns True it means the release was valid. In the case where the release
-  // is valid but has happened already, it will still return true. In all cases
-  // wait_complete_callback will be called eventually. The callback function
-  // may be called on another thread so it should be thread-safe. For
-  // convenience, another non-threadsafe version is defined below where you
-  // can supply a task runner.
-  bool Wait(SyncPointClientState* release_state,
-            uint64_t release_count,
-            const base::Closure& wait_complete_callback);
-
-  bool WaitNonThreadSafe(SyncPointClientState* release_state,
-                         uint64_t release_count,
-                         scoped_refptr<base::SingleThreadTaskRunner> runner,
-                         const base::Closure& wait_complete_callback);
-
-  // Unordered waits are waits which do not occur within the global order number
-  // processing order (IE. Not between the corresponding
-  // SyncPointOrderData::BeginProcessingOrderNumber() and
-  // SyncPointOrderData::FinishProcessingOrderNumber() calls). Because fence
-  // sync releases must occur within a corresponding order number, these waits
-  // cannot deadlock because they can never depend on any fence sync releases.
-  // This is useful for IPC messages that may be processed out of order with
-  // respect to regular command buffer processing.
-  bool WaitOutOfOrder(SyncPointClientState* release_state,
-                      uint64_t release_count,
-                      const base::Closure& wait_complete_callback);
-
-  bool WaitOutOfOrderNonThreadSafe(
-      SyncPointClientState* release_state,
-      uint64_t release_count,
-      scoped_refptr<base::SingleThreadTaskRunner> runner,
-      const base::Closure& wait_complete_callback);
-
-  void ReleaseFenceSync(uint64_t release);
-
-  // This callback is called with the namespace and id of the waiting client
-  // when a release callback is queued. The callback is called on the thread
-  // where the Wait... happens and synchronization is the responsibility of the
-  // caller.
-  typedef base::Callback<void(CommandBufferNamespace, CommandBufferId)>
-      OnWaitCallback;
-  void SetOnWaitCallback(const OnWaitCallback& callback);
-
- private:
-  friend class SyncPointManager;
-
-  SyncPointClient();
   SyncPointClient(SyncPointManager* sync_point_manager,
                   scoped_refptr<SyncPointOrderData> order_data,
                   CommandBufferNamespace namespace_id,
-                  CommandBufferId client_id);
+                  CommandBufferId command_buffer_id);
+  ~SyncPointClient();
 
+  // This behaves similarly to SyncPointManager::Wait but uses the order data
+  // to guarantee no deadlocks with other clients.
+  bool Wait(const SyncToken& sync_token, const base::Closure& callback);
+
+  // Like Wait but runs the callback on the given task runner's thread.
+  bool WaitNonThreadSafe(
+      const SyncToken& sync_token,
+      scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+      const base::Closure& callback);
+
+  // Release fence sync and run queued callbacks.
+  void ReleaseFenceSync(uint64_t release);
+
+ private:
   // Sync point manager is guaranteed to exist in the lifetime of the client.
-  SyncPointManager* sync_point_manager_;
+  SyncPointManager* const sync_point_manager_;
 
-  // Keep the state that is sharable across multiple threads.
+  scoped_refptr<SyncPointOrderData> order_data_;
+
   scoped_refptr<SyncPointClientState> client_state_;
 
   // Unique namespace/client id pair for this sync point client.
   const CommandBufferNamespace namespace_id_;
-  const CommandBufferId client_id_;
+  const CommandBufferId command_buffer_id_;
 
   DISALLOW_COPY_AND_ASSIGN(SyncPointClient);
 };
@@ -292,42 +237,67 @@
 // synchronization.
 class GPU_EXPORT SyncPointManager {
  public:
-  explicit SyncPointManager(bool allow_threaded_wait);
+  SyncPointManager();
   ~SyncPointManager();
 
-  // Creates/Destroy a sync point client which message processors should hold.
-  std::unique_ptr<SyncPointClient> CreateSyncPointClient(
-      scoped_refptr<SyncPointOrderData> order_data,
-      CommandBufferNamespace namespace_id,
-      CommandBufferId client_id);
+  // Returns true if the sync token has been released or if the command buffer
+  // does not exist.
+  bool IsSyncTokenReleased(const SyncToken& sync_token);
 
-  // Creates a sync point client which cannot process order numbers but can only
-  // Wait out of order.
-  std::unique_ptr<SyncPointClient> CreateSyncPointClientWaiter();
+  // If the wait is valid (sync token hasn't been processed or command buffer
+  // does not exist), the callback is queued to run when the sync point is
+  // released. If the wait is invalid, the callback is NOT run. The callback
+  // runs on the thread the sync point is released. Clients should use
+  // SyncPointClient::Wait because that uses order data to prevent deadlocks.
+  bool Wait(const SyncToken& sync_token,
+            uint32_t wait_order_num,
+            const base::Closure& callback);
 
-  // Finds the state of an already created sync point client.
-  scoped_refptr<SyncPointClientState> GetSyncPointClientState(
-      CommandBufferNamespace namespace_id,
-      CommandBufferId client_id);
+  // Like Wait but runs the callback on the given task runner's thread.
+  bool WaitNonThreadSafe(
+      const SyncToken& sync_token,
+      uint32_t wait_order_num,
+      scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+      const base::Closure& callback);
+
+  // WaitOutOfOrder allows waiting for a sync token indefinitely, so it
+  // should be used with trusted sync tokens only.
+  bool WaitOutOfOrder(const SyncToken& trusted_sync_token,
+                      const base::Closure& callback);
+
+  // Like WaitOutOfOrder but runs the callback on the given task runner's
+  // thread.
+  bool WaitOutOfOrderNonThreadSafe(
+      const SyncToken& trusted_sync_token,
+      scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+      const base::Closure& callback);
+
+  // Used by SyncPointClient.
+  void RegisterSyncPointClient(scoped_refptr<SyncPointClientState> client_state,
+                               CommandBufferNamespace namespace_id,
+                               CommandBufferId command_buffer_id);
+
+  void DeregisterSyncPointClient(CommandBufferNamespace namespace_id,
+                                 CommandBufferId command_buffer_id);
+
+  // Used by SyncPointOrderData.
+  uint32_t GenerateOrderNumber();
 
  private:
-  friend class SyncPointClient;
-  friend class SyncPointOrderData;
+  using ClientStateMap = std::unordered_map<CommandBufferId,
+                                            scoped_refptr<SyncPointClientState>,
+                                            CommandBufferId::Hasher>;
 
-  using ClientMap = std::unordered_map<CommandBufferId,
-                                       SyncPointClient*,
-                                       CommandBufferId::Hasher>;
-
-  uint32_t GenerateOrderNumber();
-  void DestroySyncPointClient(CommandBufferNamespace namespace_id,
-                              CommandBufferId client_id);
+  scoped_refptr<SyncPointClientState> GetSyncPointClientState(
+      CommandBufferNamespace namespace_id,
+      CommandBufferId command_buffer_id);
 
   // Order number is global for all clients.
   base::AtomicSequenceNumber global_order_num_;
 
   // Client map holds a map of clients id to client for each namespace.
-  base::Lock client_maps_lock_;
-  ClientMap client_maps_[NUM_COMMAND_BUFFER_NAMESPACES];
+  base::Lock client_state_maps_lock_;
+  ClientStateMap client_state_maps_[NUM_COMMAND_BUFFER_NAMESPACES];
 
   DISALLOW_COPY_AND_ASSIGN(SyncPointManager);
 };
diff --git a/gpu/command_buffer/service/sync_point_manager_unittest.cc b/gpu/command_buffer/service/sync_point_manager_unittest.cc
index a9f6c56..96b80623 100644
--- a/gpu/command_buffer/service/sync_point_manager_unittest.cc
+++ b/gpu/command_buffer/service/sync_point_manager_unittest.cc
@@ -8,6 +8,7 @@
 #include <queue>
 
 #include "base/bind.h"
+#include "base/memory/ptr_util.h"
 #include "gpu/command_buffer/service/sync_point_manager.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -15,29 +16,13 @@
 
 class SyncPointManagerTest : public testing::Test {
  public:
-  SyncPointManagerTest() {}
-
+  SyncPointManagerTest() : sync_point_manager_(new SyncPointManager) {}
   ~SyncPointManagerTest() override {}
 
  protected:
-  void SetUp() override {
-    sync_point_manager_.reset(new SyncPointManager(false));
-  }
-
-  void TearDown() override { sync_point_manager_.reset(); }
-
   // Simple static function which can be used to test callbacks.
   static void SetIntegerFunction(int* test, int value) { *test = value; }
 
-  // Simple static function used for testing OnWaitCallback.
-  static void OnWait(CommandBufferNamespace* namespace_id_ptr,
-                     CommandBufferId* client_id_ptr,
-                     CommandBufferNamespace namespace_id,
-                     CommandBufferId client_id) {
-    *namespace_id_ptr = namespace_id;
-    *client_id_ptr = client_id;
-  }
-
   std::unique_ptr<SyncPointManager> sync_point_manager_;
 };
 
@@ -50,9 +35,10 @@
                   CommandBufferNamespace namespace_id,
                   CommandBufferId command_buffer_id)
       : order_data(SyncPointOrderData::Create()),
-        client(sync_point_manager->CreateSyncPointClient(order_data,
-                                                         namespace_id,
-                                                         command_buffer_id)) {}
+        client(base::MakeUnique<SyncPointClient>(sync_point_manager,
+                                                 order_data,
+                                                 namespace_id,
+                                                 command_buffer_id)) {}
 
   ~SyncPointStream() {
     order_data->Destroy();
@@ -83,7 +69,7 @@
   EXPECT_EQ(0u, order_data->processed_order_num());
   EXPECT_EQ(0u, order_data->unprocessed_order_num());
 
-  const uint32_t order_num =
+  uint32_t order_num =
       order_data->GenerateUnprocessedOrderNumber(sync_point_manager_.get());
   EXPECT_EQ(1u, order_num);
 
@@ -110,200 +96,195 @@
   EXPECT_FALSE(order_data->IsProcessingOrderNumber());
 }
 
-TEST_F(SyncPointManagerTest, SyncPointClientRegistration) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId = CommandBufferId::FromUnsafeValue(0x123);
-
-  scoped_refptr<SyncPointClientState> empty_state =
-      sync_point_manager_->GetSyncPointClientState(kNamespaceId, kBufferId);
-  EXPECT_FALSE(empty_state);
-
-  scoped_refptr<SyncPointOrderData> order_data = SyncPointOrderData::Create();
-
-  std::unique_ptr<SyncPointClient> client =
-      sync_point_manager_->CreateSyncPointClient(order_data, kNamespaceId,
-                                                 kBufferId);
-
-  EXPECT_EQ(order_data, client->client_state()->order_data());
-  EXPECT_EQ(
-      client->client_state(),
-      sync_point_manager_->GetSyncPointClientState(kNamespaceId, kBufferId));
-}
-
 TEST_F(SyncPointManagerTest, BasicFenceSyncRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kBufferId = CommandBufferId::FromUnsafeValue(0x123);
 
-  scoped_refptr<SyncPointOrderData> order_data = SyncPointOrderData::Create();
-  std::unique_ptr<SyncPointClient> client =
-      sync_point_manager_->CreateSyncPointClient(order_data, kNamespaceId,
-                                                 kBufferId);
-  scoped_refptr<SyncPointClientState> client_state = client->client_state();
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kBufferId, release_count);
 
-  EXPECT_EQ(0u, client_state->fence_sync_release());
-  EXPECT_FALSE(client_state->IsFenceSyncReleased(1));
+  // Can't wait for sync token before client is registered.
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
-  const uint32_t order_num =
-      order_data->GenerateUnprocessedOrderNumber(sync_point_manager_.get());
-  order_data->BeginProcessingOrderNumber(order_num);
-  client->ReleaseFenceSync(1);
-  order_data->FinishProcessingOrderNumber(order_num);
+  SyncPointStream stream(sync_point_manager_.get(), kNamespaceId, kBufferId);
 
-  EXPECT_EQ(1u, client_state->fence_sync_release());
-  EXPECT_TRUE(client_state->IsFenceSyncReleased(1));
+  stream.AllocateOrderNum(sync_point_manager_.get());
+
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
+
+  stream.order_data->BeginProcessingOrderNumber(1);
+  stream.client->ReleaseFenceSync(release_count);
+  stream.order_data->FinishProcessingOrderNumber(1);
+
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, MultipleClientsPerOrderData) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kCmdBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kCmdBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
 
-  scoped_refptr<SyncPointOrderData> order_data = SyncPointOrderData::Create();
-  std::unique_ptr<SyncPointClient> client1 =
-      sync_point_manager_->CreateSyncPointClient(order_data, kNamespaceId,
-                                                 kBufferId1);
-  std::unique_ptr<SyncPointClient> client2 =
-      sync_point_manager_->CreateSyncPointClient(order_data, kNamespaceId,
-                                                 kBufferId2);
+  SyncPointStream stream1(sync_point_manager_.get(), kNamespaceId,
+                          kCmdBufferId1);
+  SyncPointStream stream2(sync_point_manager_.get(), kNamespaceId,
+                          kCmdBufferId2);
 
-  scoped_refptr<SyncPointClientState> client_state1 = client1->client_state();
-  scoped_refptr<SyncPointClientState> client_state2 = client2->client_state();
+  uint64_t release_count = 1;
+  SyncToken sync_token1(kNamespaceId, 0, kCmdBufferId1, release_count);
+  stream1.AllocateOrderNum(sync_point_manager_.get());
 
-  const uint32_t order_num =
-      order_data->GenerateUnprocessedOrderNumber(sync_point_manager_.get());
-  order_data->BeginProcessingOrderNumber(order_num);
-  client1->ReleaseFenceSync(1);
-  order_data->FinishProcessingOrderNumber(order_num);
+  SyncToken sync_token2(kNamespaceId, 0, kCmdBufferId2, release_count);
+  stream2.AllocateOrderNum(sync_point_manager_.get());
 
-  EXPECT_TRUE(client_state1->IsFenceSyncReleased(1));
-  EXPECT_FALSE(client_state2->IsFenceSyncReleased(1));
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token1));
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token2));
+
+  stream1.order_data->BeginProcessingOrderNumber(1);
+  stream1.client->ReleaseFenceSync(release_count);
+  stream1.order_data->FinishProcessingOrderNumber(1);
+
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token1));
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token2));
 }
 
 TEST_F(SyncPointManagerTest, BasicFenceSyncWaitRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   release_stream.AllocateOrderNum(sync_point_manager_.get());
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
   wait_stream.BeginProcessing();
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
-  ASSERT_TRUE(valid_wait);
+  EXPECT_TRUE(valid_wait);
   EXPECT_EQ(10, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
   release_stream.BeginProcessing();
-  release_stream.client->ReleaseFenceSync(1);
+  release_stream.client->ReleaseFenceSync(release_count);
   EXPECT_EQ(123, test_num);
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, WaitOnSelfFails) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
-  // Generate wait order number first.
   release_stream.AllocateOrderNum(sync_point_manager_.get());
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kWaitCmdBufferId, release_count);
+
   wait_stream.BeginProcessing();
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      wait_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
   EXPECT_FALSE(valid_wait);
-  EXPECT_EQ(123, test_num);
+  EXPECT_EQ(10, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, OutOfOrderRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   // Generate wait order number first.
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
   release_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
   wait_stream.BeginProcessing();
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
   EXPECT_FALSE(valid_wait);
-  EXPECT_EQ(123, test_num);
+  EXPECT_EQ(10, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, HigherOrderNumberRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   // Generate wait order number first.
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
   release_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
   // Order number was higher but it was actually released.
   release_stream.BeginProcessing();
-  release_stream.client->ReleaseFenceSync(1);
+  release_stream.client->ReleaseFenceSync(release_count);
   release_stream.EndProcessing();
 
+  // Release stream has already released so there's no need to wait.
   wait_stream.BeginProcessing();
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
-  EXPECT_TRUE(valid_wait);
-  EXPECT_EQ(123, test_num);
+  EXPECT_FALSE(valid_wait);
+  EXPECT_EQ(10, test_num);
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, DestroyedClientRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   release_stream.AllocateOrderNum(sync_point_manager_.get());
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
   wait_stream.BeginProcessing();
+
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
   EXPECT_TRUE(valid_wait);
   EXPECT_EQ(10, test_num);
@@ -311,18 +292,18 @@
   // Destroying the client should release the wait.
   release_stream.client.reset();
   EXPECT_EQ(123, test_num);
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, NonExistentRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   // Assign release stream order [1] and wait stream order [2].
   // This test simply tests that a wait stream of order [2] waiting on
@@ -331,31 +312,35 @@
   release_stream.AllocateOrderNum(sync_point_manager_.get());
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
   wait_stream.BeginProcessing();
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
   EXPECT_TRUE(valid_wait);
   EXPECT_EQ(10, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
   // No release but finishing the order number should automatically release.
   release_stream.BeginProcessing();
   EXPECT_EQ(10, test_num);
   release_stream.EndProcessing();
   EXPECT_EQ(123, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, NonExistentRelease2) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   // Assign Release stream order [1] and assign Wait stream orders [2, 3].
   // This test is similar to the NonExistentRelease case except
@@ -367,47 +352,52 @@
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
   // Have wait with order [3] to wait on release.
   wait_stream.BeginProcessing();
-  ASSERT_EQ(2u, wait_stream.order_data->current_order_num());
+  EXPECT_EQ(2u, wait_stream.order_data->current_order_num());
   wait_stream.EndProcessing();
   wait_stream.BeginProcessing();
-  ASSERT_EQ(3u, wait_stream.order_data->current_order_num());
+  EXPECT_EQ(3u, wait_stream.order_data->current_order_num());
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
   EXPECT_TRUE(valid_wait);
   EXPECT_EQ(10, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
   // Even though release stream order [1] did not have a release, it
   // should have changed test_num although the fence sync is still not released.
   release_stream.BeginProcessing();
-  ASSERT_EQ(1u, release_stream.order_data->current_order_num());
+  EXPECT_EQ(1u, release_stream.order_data->current_order_num());
   release_stream.EndProcessing();
-  EXPECT_FALSE(release_stream.client->client_state()->IsFenceSyncReleased(1));
   EXPECT_EQ(123, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
   // Ensure that the wait callback does not get triggered again when it is
   // actually released.
   test_num = 1;
   release_stream.AllocateOrderNum(sync_point_manager_.get());
   release_stream.BeginProcessing();
-  release_stream.client->ReleaseFenceSync(1);
+  release_stream.client->ReleaseFenceSync(release_count);
   release_stream.EndProcessing();
   EXPECT_EQ(1, test_num);
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 TEST_F(SyncPointManagerTest, NonExistentOrderNumRelease) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
+  CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO;
+  CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123);
+  CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234);
 
   SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
+                                 kReleaseCmdBufferId);
   SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
+                              kWaitCmdBufferId);
 
   // Assign Release stream orders [1, 4] and assign Wait stream orders [2, 3].
   // Here we are testing that wait order [3] will wait on a fence sync
@@ -420,15 +410,18 @@
   wait_stream.AllocateOrderNum(sync_point_manager_.get());
   release_stream.AllocateOrderNum(sync_point_manager_.get());
 
+  uint64_t release_count = 1;
+  SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
+
   // Have wait with order [3] to wait on release order [1] or [2].
   wait_stream.BeginProcessing();
-  ASSERT_EQ(2u, wait_stream.order_data->current_order_num());
+  EXPECT_EQ(2u, wait_stream.order_data->current_order_num());
   wait_stream.EndProcessing();
   wait_stream.BeginProcessing();
-  ASSERT_EQ(3u, wait_stream.order_data->current_order_num());
+  EXPECT_EQ(3u, wait_stream.order_data->current_order_num());
   int test_num = 10;
-  const bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
+  bool valid_wait = wait_stream.client->Wait(
+      sync_token,
       base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
   EXPECT_TRUE(valid_wait);
   EXPECT_EQ(10, test_num);
@@ -436,74 +429,24 @@
   // Release stream should know it should release fence sync by order [3],
   // so going through order [1] should not release it yet.
   release_stream.BeginProcessing();
-  ASSERT_EQ(1u, release_stream.order_data->current_order_num());
+  EXPECT_EQ(1u, release_stream.order_data->current_order_num());
   release_stream.EndProcessing();
-  EXPECT_FALSE(release_stream.client->client_state()->IsFenceSyncReleased(1));
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
   EXPECT_EQ(10, test_num);
 
   // Beginning order [4] should immediately trigger the wait although the fence
   // sync is still not released yet.
   release_stream.BeginProcessing();
-  ASSERT_EQ(4u, release_stream.order_data->current_order_num());
-  EXPECT_FALSE(release_stream.client->client_state()->IsFenceSyncReleased(1));
+  EXPECT_EQ(4u, release_stream.order_data->current_order_num());
   EXPECT_EQ(123, test_num);
+  EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
   // Ensure that the wait callback does not get triggered again when it is
   // actually released.
   test_num = 1;
   release_stream.client->ReleaseFenceSync(1);
   EXPECT_EQ(1, test_num);
-}
-
-TEST_F(SyncPointManagerTest, OnWaitCallbackTest) {
-  const CommandBufferNamespace kNamespaceId =
-      gpu::CommandBufferNamespace::GPU_IO;
-  const CommandBufferId kBufferId1 = CommandBufferId::FromUnsafeValue(0x123);
-  const CommandBufferId kBufferId2 = CommandBufferId::FromUnsafeValue(0x234);
-
-  SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId,
-                                 kBufferId1);
-  SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
-                              kBufferId2);
-
-  CommandBufferNamespace namespace_id = CommandBufferNamespace::INVALID;
-  CommandBufferId client_id;
-  release_stream.client->SetOnWaitCallback(
-      base::Bind(&SyncPointManagerTest::OnWait, &namespace_id, &client_id));
-
-  release_stream.AllocateOrderNum(sync_point_manager_.get());
-  wait_stream.AllocateOrderNum(sync_point_manager_.get());
-  release_stream.AllocateOrderNum(sync_point_manager_.get());
-
-  wait_stream.BeginProcessing();
-  int test_num = 10;
-  bool valid_wait = wait_stream.client->Wait(
-      release_stream.client->client_state().get(), 1,
-      base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
-  EXPECT_TRUE(valid_wait);
-  EXPECT_EQ(10, test_num);
-  EXPECT_EQ(kNamespaceId, namespace_id);
-  EXPECT_EQ(kBufferId2, client_id);
-
-  release_stream.BeginProcessing();
-  release_stream.client->ReleaseFenceSync(1);
-  EXPECT_EQ(123, test_num);
-
-  wait_stream.EndProcessing();
-
-  namespace_id = CommandBufferNamespace::INVALID;
-  client_id = CommandBufferId();
-  test_num = 10;
-  valid_wait = wait_stream.client->WaitOutOfOrder(
-      release_stream.client->client_state().get(), 2,
-      base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
-  EXPECT_TRUE(valid_wait);
-  EXPECT_EQ(10, test_num);
-  EXPECT_EQ(kNamespaceId, namespace_id);
-  EXPECT_EQ(kBufferId2, client_id);
-
-  release_stream.client->ReleaseFenceSync(2);
-  EXPECT_EQ(123, test_num);
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 }
 
 }  // namespace gpu
diff --git a/gpu/command_buffer/tests/fuzzer_main.cc b/gpu/command_buffer/tests/fuzzer_main.cc
index 4da1557..2fb7e324 100644
--- a/gpu/command_buffer/tests/fuzzer_main.cc
+++ b/gpu/command_buffer/tests/fuzzer_main.cc
@@ -88,7 +88,7 @@
  public:
   CommandBufferSetup()
       : atexit_manager_(),
-        sync_point_manager_(new SyncPointManager(false)),
+        sync_point_manager_(new SyncPointManager()),
         sync_point_order_data_(SyncPointOrderData::Create()),
         mailbox_manager_(new gles2::MailboxManagerImpl),
         share_group_(new gl::GLShareGroup),
@@ -120,9 +120,9 @@
     gl::GLSurfaceTestSupport::InitializeOneOffWithMockBindings();
 #endif
 
-    sync_point_client_ = sync_point_manager_->CreateSyncPointClient(
-        sync_point_order_data_, CommandBufferNamespace::IN_PROCESS,
-        command_buffer_id_);
+    sync_point_client_ = base::MakeUnique<SyncPointClient>(
+        sync_point_manager_.get(), sync_point_order_data_,
+        CommandBufferNamespace::IN_PROCESS, command_buffer_id_);
 
     translator_cache_ = new gles2::ShaderTranslatorCache(gpu_preferences_);
     completeness_cache_ = new gles2::FramebufferCompletenessCache;
@@ -151,8 +151,8 @@
     decoder_->set_engine(executor_.get());
     decoder_->SetFenceSyncReleaseCallback(base::Bind(
         &CommandBufferSetup::OnFenceSyncRelease, base::Unretained(this)));
-    decoder_->SetWaitFenceSyncCallback(base::Bind(
-        &CommandBufferSetup::OnWaitFenceSync, base::Unretained(this)));
+    decoder_->SetWaitSyncTokenCallback(base::Bind(
+        &CommandBufferSetup::OnWaitSyncToken, base::Unretained(this)));
     decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
 
     gles2::ContextCreationAttribHelper attrib_helper;
@@ -241,21 +241,12 @@
     sync_point_client_->ReleaseFenceSync(release);
   }
 
-  bool OnWaitFenceSync(CommandBufferNamespace namespace_id,
-                       CommandBufferId command_buffer_id,
-                       uint64_t release) {
-    CHECK(sync_point_client_);
-    scoped_refptr<gpu::SyncPointClientState> release_state =
-        sync_point_manager_->GetSyncPointClientState(namespace_id,
-                                                     command_buffer_id);
-    if (!release_state)
-      return true;
-
-    if (release_state->IsFenceSyncReleased(release))
-      return true;
-
+  bool OnWaitSyncToken(const SyncToken& sync_token) {
+    CHECK(sync_point_manager_);
+    if (sync_point_manager_->IsSyncTokenReleased(sync_token))
+      return false;
     executor_->SetScheduled(false);
-    return false;
+    return true;
   }
 
   void CreateTransferBuffer(size_t size, int32_t id) {
diff --git a/gpu/command_buffer/tests/gl_fence_sync_unittest.cc b/gpu/command_buffer/tests/gl_fence_sync_unittest.cc
index 65ff1b1..a9b68d3 100644
--- a/gpu/command_buffer/tests/gl_fence_sync_unittest.cc
+++ b/gpu/command_buffer/tests/gl_fence_sync_unittest.cc
@@ -23,7 +23,7 @@
 class GLFenceSyncTest : public testing::Test {
  protected:
   void SetUp() override {
-    sync_point_manager_.reset(new SyncPointManager(false));
+    sync_point_manager_.reset(new SyncPointManager());
 
     GLManager::Options options;
     options.sync_point_manager = sync_point_manager_.get();
@@ -53,20 +53,11 @@
   ASSERT_TRUE(GL_NO_ERROR == glGetError());
 
   // Make sure it is actually released.
-  scoped_refptr<SyncPointClientState> gl1_client_state =
-      sync_point_manager_->GetSyncPointClientState(gl1_.GetNamespaceID(),
-                                                   gl1_.GetCommandBufferID());
-  EXPECT_TRUE(gl1_client_state->IsFenceSyncReleased(fence_sync));
+  EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
 
   gl2_.MakeCurrent();
   glWaitSyncTokenCHROMIUM(sync_token.GetConstData());
   glFinish();
-
-  // gl2 should not have released anything.
-  scoped_refptr<SyncPointClientState> gl2_client_state =
-      sync_point_manager_->GetSyncPointClientState(gl2_.GetNamespaceID(),
-                                                   gl2_.GetCommandBufferID());
-  EXPECT_EQ(0u, gl2_client_state->fence_sync_release());
 }
 
 static void TestCallback(int* storage, int assign) {
diff --git a/gpu/command_buffer/tests/gl_manager.cc b/gpu/command_buffer/tests/gl_manager.cc
index 73463f8..06b238d 100644
--- a/gpu/command_buffer/tests/gl_manager.cc
+++ b/gpu/command_buffer/tests/gl_manager.cc
@@ -185,30 +185,11 @@
 scoped_refptr<gl::GLSurface>* GLManager::base_surface_;
 scoped_refptr<gl::GLContext>* GLManager::base_context_;
 
-GLManager::Options::Options()
-    : size(4, 4),
-      sync_point_manager(NULL),
-      share_group_manager(NULL),
-      share_mailbox_manager(NULL),
-      virtual_manager(NULL),
-      bind_generates_resource(false),
-      lose_context_when_out_of_memory(false),
-      context_lost_allowed(false),
-      context_type(gles2::CONTEXT_TYPE_OPENGLES2),
-      force_shader_name_hashing(false),
-      multisampled(false),
-      backbuffer_alpha(true),
-      image_factory(nullptr),
-      preserve_backbuffer(false) {}
+GLManager::Options::Options() = default;
 
 GLManager::GLManager()
-    : sync_point_manager_(nullptr),
-      context_lost_allowed_(false),
-      pause_commands_(false),
-      paused_order_num_(0),
-      command_buffer_id_(
-          CommandBufferId::FromUnsafeValue(g_next_command_buffer_id++)),
-      next_fence_sync_release_(1) {
+    : command_buffer_id_(
+          CommandBufferId::FromUnsafeValue(g_next_command_buffer_id++)) {
   SetupBaseContext();
 }
 
@@ -369,13 +350,14 @@
   if (options.sync_point_manager) {
     sync_point_manager_ = options.sync_point_manager;
     sync_point_order_data_ = SyncPointOrderData::Create();
-    sync_point_client_ = sync_point_manager_->CreateSyncPointClient(
-        sync_point_order_data_, GetNamespaceID(), GetCommandBufferID());
+    sync_point_client_ = base::MakeUnique<SyncPointClient>(
+        sync_point_manager_, sync_point_order_data_, GetNamespaceID(),
+        GetCommandBufferID());
 
     decoder_->SetFenceSyncReleaseCallback(
         base::Bind(&GLManager::OnFenceSyncRelease, base::Unretained(this)));
-    decoder_->SetWaitFenceSyncCallback(
-        base::Bind(&GLManager::OnWaitFenceSync, base::Unretained(this)));
+    decoder_->SetWaitSyncTokenCallback(
+        base::Bind(&GLManager::OnWaitSyncToken, base::Unretained(this)));
   } else {
     sync_point_manager_ = nullptr;
     sync_point_order_data_ = nullptr;
@@ -428,25 +410,16 @@
 
 void GLManager::OnFenceSyncRelease(uint64_t release) {
   DCHECK(sync_point_client_);
-  DCHECK(!sync_point_client_->client_state()->IsFenceSyncReleased(release));
   command_buffer_->SetReleaseCount(release);
   sync_point_client_->ReleaseFenceSync(release);
 }
 
-bool GLManager::OnWaitFenceSync(gpu::CommandBufferNamespace namespace_id,
-                                gpu::CommandBufferId command_buffer_id,
-                                uint64_t release) {
-  DCHECK(sync_point_client_);
-  scoped_refptr<gpu::SyncPointClientState> release_state =
-      sync_point_manager_->GetSyncPointClientState(namespace_id,
-                                                   command_buffer_id);
-  if (!release_state)
-    return true;
-
+bool GLManager::OnWaitSyncToken(const SyncToken& sync_token) {
+  DCHECK(sync_point_manager_);
   // GLManager does not support being multithreaded at this point, so the fence
   // sync must be released by the time wait is called.
-  DCHECK(release_state->IsFenceSyncReleased(release));
-  return true;
+  DCHECK(sync_point_manager_->IsSyncTokenReleased(sync_token));
+  return false;
 }
 
 void GLManager::MakeCurrent() {
@@ -636,19 +609,16 @@
 void GLManager::SignalSyncToken(const gpu::SyncToken& sync_token,
                                 const base::Closure& callback) {
   if (sync_point_manager_) {
-    scoped_refptr<gpu::SyncPointClientState> release_state =
-        sync_point_manager_->GetSyncPointClientState(
-            sync_token.namespace_id(), sync_token.command_buffer_id());
-
-    if (release_state) {
-      sync_point_client_->WaitOutOfOrder(release_state.get(),
-                                         sync_token.release_count(), callback);
-      return;
-    }
+    DCHECK(!paused_order_num_);
+    uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber(
+        sync_point_manager_);
+    sync_point_order_data_->BeginProcessingOrderNumber(order_num);
+    if (!sync_point_client_->Wait(sync_token, callback))
+      callback.Run();
+    sync_point_order_data_->FinishProcessingOrderNumber(order_num);
+  } else {
+    callback.Run();
   }
-
-  // Something went wrong, just run the callback now.
-  callback.Run();
 }
 
 bool GLManager::CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) {
diff --git a/gpu/command_buffer/tests/gl_manager.h b/gpu/command_buffer/tests/gl_manager.h
index b4ecb3e..b8877d61 100644
--- a/gpu/command_buffer/tests/gl_manager.h
+++ b/gpu/command_buffer/tests/gl_manager.h
@@ -54,32 +54,32 @@
   struct Options {
     Options();
     // The size of the backbuffer.
-    gfx::Size size;
+    gfx::Size size = gfx::Size(4, 4);
     // If not null will have a corresponding sync point manager.
-    SyncPointManager* sync_point_manager;
+    SyncPointManager* sync_point_manager = nullptr;
     // If not null will share resources with this context.
-    GLManager* share_group_manager;
+    GLManager* share_group_manager = nullptr;
     // If not null will share a mailbox manager with this context.
-    GLManager* share_mailbox_manager;
+    GLManager* share_mailbox_manager = nullptr;
     // If not null will create a virtual manager based on this context.
-    GLManager* virtual_manager;
+    GLManager* virtual_manager = nullptr;
     // Whether or not glBindXXX generates a resource.
-    bool bind_generates_resource;
+    bool bind_generates_resource = false;
     // Whether or not the context is auto-lost when GL_OUT_OF_MEMORY occurs.
-    bool lose_context_when_out_of_memory;
+    bool lose_context_when_out_of_memory = false;
     // Whether or not it's ok to lose the context.
-    bool context_lost_allowed;
-    gles2::ContextType context_type;
+    bool context_lost_allowed = false;
+    gles2::ContextType context_type = gles2::CONTEXT_TYPE_OPENGLES2;
     // Force shader name hashing for all context types.
-    bool force_shader_name_hashing;
+    bool force_shader_name_hashing = false;
     // Whether the buffer is multisampled.
-    bool multisampled;
+    bool multisampled = false;
     // Whether the backbuffer has an alpha channel.
-    bool backbuffer_alpha;
+    bool backbuffer_alpha = true;
     // The ImageFactory to use to generate images for the backbuffer.
-    gpu::ImageFactory* image_factory;
+    gpu::ImageFactory* image_factory = nullptr;
     // Whether to preserve the backbuffer after a call to SwapBuffers().
-    bool preserve_backbuffer;
+    bool preserve_backbuffer = false;
   };
   GLManager();
   ~GLManager() override;
@@ -153,13 +153,11 @@
   bool GetBufferChanged(int32_t transfer_buffer_id);
   void SetupBaseContext();
   void OnFenceSyncRelease(uint64_t release);
-  bool OnWaitFenceSync(gpu::CommandBufferNamespace namespace_id,
-                       gpu::CommandBufferId command_buffer_id,
-                       uint64_t release);
+  bool OnWaitSyncToken(const SyncToken& sync_token);
 
   gpu::GpuPreferences gpu_preferences_;
 
-  SyncPointManager* sync_point_manager_;  // Non-owning.
+  SyncPointManager* sync_point_manager_ = nullptr;  // Non-owning.
 
   scoped_refptr<SyncPointOrderData> sync_point_order_data_;
   std::unique_ptr<SyncPointClient> sync_point_client_;
@@ -173,12 +171,12 @@
   std::unique_ptr<gles2::GLES2CmdHelper> gles2_helper_;
   std::unique_ptr<TransferBuffer> transfer_buffer_;
   std::unique_ptr<gles2::GLES2Implementation> gles2_implementation_;
-  bool context_lost_allowed_;
-  bool pause_commands_;
-  uint32_t paused_order_num_;
+  bool context_lost_allowed_ = false;
+  bool pause_commands_ = false;
+  uint32_t paused_order_num_ = 0;
 
   const CommandBufferId command_buffer_id_;
-  uint64_t next_fence_sync_release_;
+  uint64_t next_fence_sync_release_ = 1;
 
   bool use_iosurface_memory_buffers_ = false;
 
diff --git a/gpu/ipc/in_process_command_buffer.cc b/gpu/ipc/in_process_command_buffer.cc
index 4ae663a..ec3464e 100644
--- a/gpu/ipc/in_process_command_buffer.cc
+++ b/gpu/ipc/in_process_command_buffer.cc
@@ -77,8 +77,7 @@
 class GpuInProcessThreadHolder : public base::Thread {
  public:
   GpuInProcessThreadHolder()
-      : base::Thread("GpuThread"),
-        sync_point_manager_(new SyncPointManager(false)) {
+      : base::Thread("GpuThread"), sync_point_manager_(new SyncPointManager()) {
     Start();
   }
 
@@ -349,8 +348,9 @@
   }
 
   sync_point_order_data_ = SyncPointOrderData::Create();
-  sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
-      sync_point_order_data_, GetNamespaceID(), GetCommandBufferID());
+  sync_point_client_ = base::MakeUnique<SyncPointClient>(
+      service_->sync_point_manager(), sync_point_order_data_, GetNamespaceID(),
+      GetCommandBufferID());
 
   if (service_->UseVirtualizedGLContexts() ||
       decoder_->GetContextGroup()
@@ -417,8 +417,8 @@
   decoder_->SetFenceSyncReleaseCallback(
       base::Bind(&InProcessCommandBuffer::FenceSyncReleaseOnGpuThread,
                  base::Unretained(this)));
-  decoder_->SetWaitFenceSyncCallback(
-      base::Bind(&InProcessCommandBuffer::WaitFenceSyncOnGpuThread,
+  decoder_->SetWaitSyncTokenCallback(
+      base::Bind(&InProcessCommandBuffer::WaitSyncTokenOnGpuThread,
                  base::Unretained(this)));
   decoder_->SetDescheduleUntilFinishedCallback(
       base::Bind(&InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread,
@@ -801,9 +801,8 @@
     }
   }
 
-  if (fence_sync) {
+  if (fence_sync)
     sync_point_client_->ReleaseFenceSync(fence_sync);
-  }
 }
 
 void InProcessCommandBuffer::DestroyImage(int32_t id) {
@@ -828,81 +827,57 @@
 }
 
 void InProcessCommandBuffer::FenceSyncReleaseOnGpuThread(uint64_t release) {
-  DCHECK(!sync_point_client_->client_state()->IsFenceSyncReleased(release));
+  SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
+                       GetCommandBufferID(), release);
+
   gles2::MailboxManager* mailbox_manager =
       decoder_->GetContextGroup()->mailbox_manager();
-  if (mailbox_manager->UsesSync()) {
-    SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
-                         GetCommandBufferID(), release);
-    mailbox_manager->PushTextureUpdates(sync_token);
-  }
+  mailbox_manager->PushTextureUpdates(sync_token);
 
   sync_point_client_->ReleaseFenceSync(release);
 }
 
-bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
-    gpu::CommandBufferNamespace namespace_id,
-    gpu::CommandBufferId command_buffer_id,
-    uint64_t release) {
+bool InProcessCommandBuffer::WaitSyncTokenOnGpuThread(
+    const SyncToken& sync_token) {
   DCHECK(!waiting_for_sync_point_);
   gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
   DCHECK(sync_point_manager);
 
-  scoped_refptr<gpu::SyncPointClientState> release_state =
-      sync_point_manager->GetSyncPointClientState(namespace_id,
-                                                  command_buffer_id);
-
-  if (!release_state)
-    return true;
+  gles2::MailboxManager* mailbox_manager =
+      decoder_->GetContextGroup()->mailbox_manager();
+  DCHECK(mailbox_manager);
 
   if (service_->BlockThreadOnWaitSyncToken()) {
-    if (!release_state->IsFenceSyncReleased(release)) {
-      // Use waitable event which is signalled when the release fence is
-      // released.
-      sync_point_client_->Wait(
-          release_state.get(), release,
-          base::Bind(&base::WaitableEvent::Signal,
-                     base::Unretained(&fence_sync_wait_event_)));
+    // Wait if sync point wait is valid.
+    if (sync_point_client_->Wait(
+            sync_token,
+            base::Bind(&base::WaitableEvent::Signal,
+                       base::Unretained(&fence_sync_wait_event_)))) {
       fence_sync_wait_event_.Wait();
     }
 
-    gles2::MailboxManager* mailbox_manager =
-        decoder_->GetContextGroup()->mailbox_manager();
-    SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
     mailbox_manager->PullTextureUpdates(sync_token);
-    return true;
+    return false;
   }
 
-  if (release_state->IsFenceSyncReleased(release)) {
-    gles2::MailboxManager* mailbox_manager =
-        decoder_->GetContextGroup()->mailbox_manager();
-    SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
+  waiting_for_sync_point_ = sync_point_client_->Wait(
+      sync_token,
+      base::Bind(&InProcessCommandBuffer::OnWaitSyncTokenCompleted,
+                 gpu_thread_weak_ptr_factory_.GetWeakPtr(), sync_token));
+  if (!waiting_for_sync_point_) {
     mailbox_manager->PullTextureUpdates(sync_token);
-    return true;
+    return false;
   }
 
-  waiting_for_sync_point_ = true;
-  sync_point_client_->Wait(
-      release_state.get(), release,
-      base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted,
-                 gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id,
-                 command_buffer_id, release));
-
-  if (!waiting_for_sync_point_)
-    return true;
-
   executor_->SetScheduled(false);
-  return false;
+  return true;
 }
 
-void InProcessCommandBuffer::OnWaitFenceSyncCompleted(
-    CommandBufferNamespace namespace_id,
-    CommandBufferId command_buffer_id,
-    uint64_t release) {
+void InProcessCommandBuffer::OnWaitSyncTokenCompleted(
+    const SyncToken& sync_token) {
   DCHECK(waiting_for_sync_point_);
   gles2::MailboxManager* mailbox_manager =
       decoder_->GetContextGroup()->mailbox_manager();
-  SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
   mailbox_manager->PullTextureUpdates(sync_token);
   waiting_for_sync_point_ = false;
   executor_->SetScheduled(true);
@@ -931,20 +906,8 @@
 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
     const SyncToken& sync_token,
     const base::Closure& callback) {
-  gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
-  DCHECK(sync_point_manager);
-
-  scoped_refptr<gpu::SyncPointClientState> release_state =
-      sync_point_manager->GetSyncPointClientState(
-          sync_token.namespace_id(), sync_token.command_buffer_id());
-
-  if (!release_state) {
+  if (!sync_point_client_->Wait(sync_token, WrapCallback(callback)))
     callback.Run();
-    return;
-  }
-
-  sync_point_client_->WaitOutOfOrder(
-      release_state.get(), sync_token.release_count(), WrapCallback(callback));
 }
 
 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
@@ -1013,7 +976,7 @@
                                              const base::Closure& callback) {
   CheckSequencedThread();
   QueueTask(
-      true,
+      false,
       base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
                  base::Unretained(this), sync_token, WrapCallback(callback)));
 }
diff --git a/gpu/ipc/in_process_command_buffer.h b/gpu/ipc/in_process_command_buffer.h
index 02b1366..6bc7066 100644
--- a/gpu/ipc/in_process_command_buffer.h
+++ b/gpu/ipc/in_process_command_buffer.h
@@ -237,12 +237,8 @@
   void ProcessTasksOnGpuThread();
   void CheckSequencedThread();
   void FenceSyncReleaseOnGpuThread(uint64_t release);
-  bool WaitFenceSyncOnGpuThread(gpu::CommandBufferNamespace namespace_id,
-                                gpu::CommandBufferId command_buffer_id,
-                                uint64_t release);
-  void OnWaitFenceSyncCompleted(CommandBufferNamespace namespace_id,
-                                CommandBufferId command_buffer_id,
-                                uint64_t release);
+  bool WaitSyncTokenOnGpuThread(const SyncToken& sync_token);
+  void OnWaitSyncTokenCompleted(const SyncToken& sync_token);
   void DescheduleUntilFinishedOnGpuThread();
   void RescheduleAfterFinishedOnGpuThread();
   void SignalSyncTokenOnGpuThread(const SyncToken& sync_token,
diff --git a/gpu/ipc/service/gpu_channel_manager.cc b/gpu/ipc/service/gpu_channel_manager.cc
index 2ca665f..a6a099c6 100644
--- a/gpu/ipc/service/gpu_channel_manager.cc
+++ b/gpu/ipc/service/gpu_channel_manager.cc
@@ -64,8 +64,6 @@
       mailbox_manager_(gles2::MailboxManager::Create(gpu_preferences)),
       gpu_memory_manager_(this),
       sync_point_manager_(sync_point_manager),
-      sync_point_client_waiter_(
-          sync_point_manager->CreateSyncPointClientWaiter()),
       gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
       gpu_feature_info_(gpu_feature_info),
       exiting_for_lost_context_(false),
@@ -174,21 +172,13 @@
     gfx::GpuMemoryBufferId id,
     int client_id,
     const SyncToken& sync_token) {
-  if (sync_token.HasData()) {
-    scoped_refptr<SyncPointClientState> release_state =
-        sync_point_manager()->GetSyncPointClientState(
-            sync_token.namespace_id(), sync_token.command_buffer_id());
-    if (release_state) {
-      sync_point_client_waiter_->WaitOutOfOrder(
-          release_state.get(), sync_token.release_count(),
+  if (!sync_point_manager_->WaitOutOfOrder(
+          sync_token,
           base::Bind(&GpuChannelManager::InternalDestroyGpuMemoryBuffer,
-                     base::Unretained(this), id, client_id));
-      return;
-    }
+                     base::Unretained(this), id, client_id))) {
+    // No sync token or invalid sync token, destroy immediately.
+    InternalDestroyGpuMemoryBuffer(id, client_id);
   }
-
-  // No sync token or invalid sync token, destroy immediately.
-  InternalDestroyGpuMemoryBuffer(id, client_id);
 }
 
 void GpuChannelManager::PopulateShaderCache(const std::string& program_proto) {
diff --git a/gpu/ipc/service/gpu_channel_manager.h b/gpu/ipc/service/gpu_channel_manager.h
index 5ca1c677..01301b27 100644
--- a/gpu/ipc/service/gpu_channel_manager.h
+++ b/gpu/ipc/service/gpu_channel_manager.h
@@ -39,7 +39,6 @@
 namespace gpu {
 struct GpuPreferences;
 class PreemptionFlag;
-class SyncPointClient;
 class SyncPointManager;
 struct SyncToken;
 namespace gles2 {
@@ -188,7 +187,6 @@
   GpuMemoryManager gpu_memory_manager_;
   // SyncPointManager guaranteed to outlive running MessageLoop.
   SyncPointManager* sync_point_manager_;
-  std::unique_ptr<SyncPointClient> sync_point_client_waiter_;
   std::unique_ptr<gles2::ProgramCache> program_cache_;
   scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache_;
   scoped_refptr<gles2::FramebufferCompletenessCache>
diff --git a/gpu/ipc/service/gpu_channel_test_common.cc b/gpu/ipc/service/gpu_channel_test_common.cc
index 3d199eb..85b207ee 100644
--- a/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/gpu/ipc/service/gpu_channel_test_common.cc
@@ -132,7 +132,7 @@
 GpuChannelTestCommon::GpuChannelTestCommon()
     : task_runner_(new base::TestSimpleTaskRunner),
       io_task_runner_(new base::TestSimpleTaskRunner),
-      sync_point_manager_(new SyncPointManager(false)),
+      sync_point_manager_(new SyncPointManager()),
       channel_manager_delegate_(new TestGpuChannelManagerDelegate()) {}
 
 GpuChannelTestCommon::~GpuChannelTestCommon() {
diff --git a/gpu/ipc/service/gpu_command_buffer_stub.cc b/gpu/ipc/service/gpu_command_buffer_stub.cc
index 1cbea32..ffa0a3c 100644
--- a/gpu/ipc/service/gpu_command_buffer_stub.cc
+++ b/gpu/ipc/service/gpu_command_buffer_stub.cc
@@ -276,7 +276,10 @@
       message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
       message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
       message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
-      message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID) {
+      message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
+      message.type() != GpuCommandBufferMsg_WaitSyncToken::ID &&
+      message.type() != GpuCommandBufferMsg_SignalSyncToken::ID &&
+      message.type() != GpuCommandBufferMsg_SignalQuery::ID) {
     if (!MakeCurrent())
       return false;
     have_context = true;
@@ -642,7 +645,8 @@
   decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
   executor_.reset(new CommandExecutor(command_buffer_.get(), decoder_.get(),
                                       decoder_.get()));
-  sync_point_client_ = channel_->sync_point_manager()->CreateSyncPointClient(
+  sync_point_client_ = base::MakeUnique<SyncPointClient>(
+      channel_->sync_point_manager(),
       channel_->GetSyncPointOrderData(stream_id_),
       CommandBufferNamespace::GPU_IO, command_buffer_id_);
 
@@ -783,8 +787,8 @@
                  base::Unretained(this)));
   decoder_->SetFenceSyncReleaseCallback(base::Bind(
       &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this)));
-  decoder_->SetWaitFenceSyncCallback(base::Bind(
-      &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this)));
+  decoder_->SetWaitSyncTokenCallback(base::Bind(
+      &GpuCommandBufferStub::OnWaitSyncToken, base::Unretained(this)));
   decoder_->SetDescheduleUntilFinishedCallback(
       base::Bind(&GpuCommandBufferStub::OnDescheduleUntilFinished,
                  base::Unretained(this)));
@@ -1002,34 +1006,12 @@
   executor_->PutChanged();
 }
 
-void GpuCommandBufferStub::PullTextureUpdates(
-    CommandBufferNamespace namespace_id,
-    CommandBufferId command_buffer_id,
-    uint32_t release) {
-  gles2::MailboxManager* mailbox_manager =
-      context_group_->mailbox_manager();
-  if (mailbox_manager->UsesSync() && MakeCurrent()) {
-    SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
-    mailbox_manager->PullTextureUpdates(sync_token);
-  }
-}
-
-void GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) {
-  OnWaitFenceSync(sync_token.namespace_id(), sync_token.command_buffer_id(),
-                  sync_token.release_count());
-}
-
 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token,
                                              uint32_t id) {
-  scoped_refptr<SyncPointClientState> release_state =
-      channel_->sync_point_manager()->GetSyncPointClientState(
-          sync_token.namespace_id(), sync_token.command_buffer_id());
-
-  if (release_state) {
-    sync_point_client_->Wait(release_state.get(), sync_token.release_count(),
-                             base::Bind(&GpuCommandBufferStub::OnSignalAck,
-                                        this->AsWeakPtr(), id));
-  } else {
+  if (!sync_point_client_->WaitNonThreadSafe(
+          sync_token, channel_->task_runner(),
+          base::Bind(&GpuCommandBufferStub::OnSignalAck, this->AsWeakPtr(),
+                     id))) {
     OnSignalAck(id);
   }
 }
@@ -1058,18 +1040,11 @@
 }
 
 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
-  if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) {
-    DLOG(ERROR) << "Fence Sync has already been released.";
-    return;
-  }
-
-  gles2::MailboxManager* mailbox_manager =
-      context_group_->mailbox_manager();
-  if (mailbox_manager->UsesSync() && MakeCurrent()) {
-    SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0,
-                              command_buffer_id_, release);
+  SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0, command_buffer_id_,
+                       release);
+  gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+  if (mailbox_manager->UsesSync() && MakeCurrent())
     mailbox_manager->PushTextureUpdates(sync_token);
-  }
 
   command_buffer_->SetReleaseCount(release);
   sync_point_client_->ReleaseFenceSync(release);
@@ -1090,50 +1065,40 @@
   channel_->OnStreamRescheduled(stream_id_, true);
 }
 
-bool GpuCommandBufferStub::OnWaitFenceSync(
-    CommandBufferNamespace namespace_id,
-    CommandBufferId command_buffer_id,
-    uint64_t release) {
+bool GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) {
   DCHECK(!waiting_for_sync_point_);
   DCHECK(executor_->scheduled());
+  TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncToken", this, "GpuCommandBufferStub",
+                           this);
 
-  scoped_refptr<SyncPointClientState> release_state =
-      channel_->sync_point_manager()->GetSyncPointClientState(
-          namespace_id, command_buffer_id);
+  waiting_for_sync_point_ = sync_point_client_->WaitNonThreadSafe(
+      sync_token, channel_->task_runner(),
+      base::Bind(&GpuCommandBufferStub::OnWaitSyncTokenCompleted, AsWeakPtr(),
+                 sync_token));
 
-  if (!release_state)
-    return true;
-
-  if (release_state->IsFenceSyncReleased(release)) {
-    PullTextureUpdates(namespace_id, command_buffer_id, release);
+  if (waiting_for_sync_point_) {
+    executor_->SetScheduled(false);
+    channel_->OnStreamRescheduled(stream_id_, false);
     return true;
   }
 
-  TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
-                           this);
-  waiting_for_sync_point_ = true;
-  sync_point_client_->WaitNonThreadSafe(
-      release_state.get(), release, channel_->task_runner(),
-      base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted,
-                 this->AsWeakPtr(), namespace_id, command_buffer_id, release));
-
-  if (!waiting_for_sync_point_)
-    return true;
-
-  executor_->SetScheduled(false);
-  channel_->OnStreamRescheduled(stream_id_, false);
+  gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+  if (mailbox_manager->UsesSync() && MakeCurrent())
+    mailbox_manager->PullTextureUpdates(sync_token);
   return false;
 }
 
-void GpuCommandBufferStub::OnWaitFenceSyncCompleted(
-    CommandBufferNamespace namespace_id,
-    CommandBufferId command_buffer_id,
-    uint64_t release) {
+void GpuCommandBufferStub::OnWaitSyncTokenCompleted(
+    const SyncToken& sync_token) {
   DCHECK(waiting_for_sync_point_);
-  TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
-                         this);
-  PullTextureUpdates(namespace_id, command_buffer_id, release);
+  TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncTokenCompleted", this,
+                         "GpuCommandBufferStub", this);
   waiting_for_sync_point_ = false;
+
+  gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+  if (mailbox_manager->UsesSync() && MakeCurrent())
+    mailbox_manager->PullTextureUpdates(sync_token);
+
   executor_->SetScheduled(true);
   channel_->OnStreamRescheduled(stream_id_, true);
 }
@@ -1181,13 +1146,8 @@
     return;
 
   image_manager->AddImage(image.get(), id);
-  if (image_release_count) {
-    DLOG_IF(ERROR,
-            image_release_count !=
-                sync_point_client_->client_state()->fence_sync_release() + 1)
-        << "Client released fences out of order.";
+  if (image_release_count)
     sync_point_client_->ReleaseFenceSync(image_release_count);
-  }
 }
 
 void GpuCommandBufferStub::OnDestroyImage(int32_t id) {
@@ -1211,14 +1171,14 @@
   GPUCommandBufferConsoleMessage console_message;
   console_message.id = id;
   console_message.message = message;
-  IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
-      route_id_, console_message);
+  IPC::Message* msg =
+      new GpuCommandBufferMsg_ConsoleMsg(route_id_, console_message);
   msg->set_unblock(true);
   Send(msg);
 }
 
-void GpuCommandBufferStub::SendCachedShader(
-    const std::string& key, const std::string& shader) {
+void GpuCommandBufferStub::SendCachedShader(const std::string& key,
+                                            const std::string& shader) {
   channel_->CacheShader(key, shader);
 }
 
diff --git a/gpu/ipc/service/gpu_command_buffer_stub.h b/gpu/ipc/service/gpu_command_buffer_stub.h
index 75dba0a..3803172 100644
--- a/gpu/ipc/service/gpu_command_buffer_stub.h
+++ b/gpu/ipc/service/gpu_command_buffer_stub.h
@@ -167,21 +167,16 @@
                                 uint32_t size);
   void OnDestroyTransferBuffer(int32_t id);
   void OnGetTransferBuffer(int32_t id, IPC::Message* reply_message);
+  bool OnWaitSyncToken(const SyncToken& sync_token);
 
   void OnEnsureBackbuffer();
 
-  void OnWaitSyncToken(const SyncToken& sync_token);
   void OnSignalSyncToken(const SyncToken& sync_token, uint32_t id);
   void OnSignalAck(uint32_t id);
   void OnSignalQuery(uint32_t query, uint32_t id);
 
   void OnFenceSyncRelease(uint64_t release);
-  bool OnWaitFenceSync(CommandBufferNamespace namespace_id,
-                       CommandBufferId command_buffer_id,
-                       uint64_t release);
-  void OnWaitFenceSyncCompleted(CommandBufferNamespace namespace_id,
-                                CommandBufferId command_buffer_id,
-                                uint64_t release);
+  void OnWaitSyncTokenCompleted(const SyncToken& sync_token);
 
   void OnDescheduleUntilFinished();
   void OnRescheduleAfterFinished();
@@ -211,9 +206,6 @@
 
   bool CheckContextLost();
   void CheckCompleteWaits();
-  void PullTextureUpdates(CommandBufferNamespace namespace_id,
-                          CommandBufferId command_buffer_id,
-                          uint32_t release);
 
   // The lifetime of objects of this class is managed by a GpuChannel. The
   // GpuChannels destroy all the GpuCommandBufferStubs that they own when they
diff --git a/services/ui/gpu/gpu_service.cc b/services/ui/gpu/gpu_service.cc
index c0e224e..f5e5b1f 100644
--- a/services/ui/gpu/gpu_service.cc
+++ b/services/ui/gpu/gpu_service.cc
@@ -83,9 +83,7 @@
 
   sync_point_manager_ = sync_point_manager;
   if (!sync_point_manager_) {
-    const bool allow_threaded_wait = false;
-    owned_sync_point_manager_ =
-        base::MakeUnique<gpu::SyncPointManager>(allow_threaded_wait);
+    owned_sync_point_manager_ = base::MakeUnique<gpu::SyncPointManager>();
     sync_point_manager_ = owned_sync_point_manager_.get();
   }