Adds a GPU version of the ImageDecodeController to the compositor.
This allows us to pre-decode and upload images before raster work
starts, allowing for more parallelism in the GPU composting path.
BUG: 577372, 601848
CQ_INCLUDE_TRYBOTS=tryserver.blink:linux_blink_rel
Review URL: https://codereview.chromium.org/1832573004
Cr-Commit-Position: refs/heads/master@{#386493}
diff --git a/cc/BUILD.gn b/cc/BUILD.gn
index a636ec1..f1f05f5 100644
--- a/cc/BUILD.gn
+++ b/cc/BUILD.gn
@@ -877,6 +877,7 @@
"test/mock_helper_unittest.cc",
"test/ordered_simple_task_runner_unittest.cc",
"test/test_web_graphics_context_3d_unittest.cc",
+ "tiles/gpu_image_decode_controller_unittest.cc",
"tiles/picture_layer_tiling_set_unittest.cc",
"tiles/picture_layer_tiling_unittest.cc",
"tiles/software_image_decode_controller_unittest.cc",
diff --git a/cc/cc_tests.gyp b/cc/cc_tests.gyp
index 7b08919..fb5d987 100644
--- a/cc/cc_tests.gyp
+++ b/cc/cc_tests.gyp
@@ -118,6 +118,7 @@
'test/mock_helper_unittest.cc',
'test/ordered_simple_task_runner_unittest.cc',
'test/test_web_graphics_context_3d_unittest.cc',
+ 'tiles/gpu_image_decode_controller_unittest.cc',
'tiles/picture_layer_tiling_set_unittest.cc',
'tiles/picture_layer_tiling_unittest.cc',
'tiles/software_image_decode_controller_unittest.cc',
diff --git a/cc/playback/draw_image.h b/cc/playback/draw_image.h
index 83b10ba3..ac7a70ec 100644
--- a/cc/playback/draw_image.h
+++ b/cc/playback/draw_image.h
@@ -28,7 +28,7 @@
const SkIRect src_rect() const { return src_rect_; }
SkFilterQuality filter_quality() const { return filter_quality_; }
bool matrix_is_decomposable() const { return matrix_is_decomposable_; }
- const SkMatrix& matrix() { return matrix_; }
+ const SkMatrix& matrix() const { return matrix_; }
DrawImage ApplyScale(float scale) const {
SkMatrix scaled_matrix = matrix_;
diff --git a/cc/raster/tile_task_runner.cc b/cc/raster/tile_task_runner.cc
index 50e8805d..e76218c 100644
--- a/cc/raster/tile_task_runner.cc
+++ b/cc/raster/tile_task_runner.cc
@@ -46,9 +46,16 @@
ImageDecodeTask::ImageDecodeTask() {
}
+ImageDecodeTask::ImageDecodeTask(scoped_refptr<ImageDecodeTask> dependency)
+ : dependency_(std::move(dependency)) {}
+
ImageDecodeTask::~ImageDecodeTask() {
}
+bool ImageDecodeTask::SupportsConcurrentExecution() const {
+ return true;
+}
+
RasterTask::RasterTask(ImageDecodeTask::Vector* dependencies) {
dependencies_.swap(*dependencies);
}
diff --git a/cc/raster/tile_task_runner.h b/cc/raster/tile_task_runner.h
index 8c3bb34..fe04575 100644
--- a/cc/raster/tile_task_runner.h
+++ b/cc/raster/tile_task_runner.h
@@ -58,9 +58,22 @@
public:
typedef std::vector<scoped_refptr<ImageDecodeTask>> Vector;
+ // Indicates whether this ImageDecodeTask can be run at the same time as
+ // other tasks in the task graph. If false, this task will be scheduled with
+ // TASK_CATEGORY_NONCONCURRENT_FOREGROUND. The base implementation always
+ // returns true.
+ virtual bool SupportsConcurrentExecution() const;
+
+ // Returns an optional task which this task depends on. May be null.
+ const scoped_refptr<ImageDecodeTask>& dependency() { return dependency_; }
+
protected:
ImageDecodeTask();
+ explicit ImageDecodeTask(scoped_refptr<ImageDecodeTask> dependency);
~ImageDecodeTask() override;
+
+ private:
+ scoped_refptr<ImageDecodeTask> dependency_;
};
class CC_EXPORT RasterTask : public TileTask {
diff --git a/cc/raster/tile_task_worker_pool.cc b/cc/raster/tile_task_worker_pool.cc
index 91e5164..f8c1c73 100644
--- a/cc/raster/tile_task_worker_pool.cc
+++ b/cc/raster/tile_task_worker_pool.cc
@@ -122,9 +122,10 @@
} else {
TRACE_EVENT0("cc",
"TileTaskWorkerPool::PlaybackToMemory::ConvertRGBA4444");
- SkImageInfo dst_info = SkImageInfo::Make(
- info.width(), info.height(), ResourceFormatToSkColorType(format),
- info.alphaType(), info.profileType());
+ SkImageInfo dst_info =
+ SkImageInfo::Make(info.width(), info.height(),
+ ResourceFormatToClosestSkColorType(format),
+ info.alphaType(), info.profileType());
bool rv = surface->readPixels(dst_info, memory, stride, 0, 0);
DCHECK(rv);
}
diff --git a/cc/resources/resource_format.cc b/cc/resources/resource_format.cc
index ec553d8..101aef4 100644
--- a/cc/resources/resource_format.cc
+++ b/cc/resources/resource_format.cc
@@ -9,21 +9,24 @@
namespace cc {
-SkColorType ResourceFormatToSkColorType(ResourceFormat format) {
+SkColorType ResourceFormatToClosestSkColorType(ResourceFormat format) {
+ // Use kN32_SkColorType if there is no corresponding SkColorType.
switch (format) {
case RGBA_4444:
return kARGB_4444_SkColorType;
case RGBA_8888:
case BGRA_8888:
return kN32_SkColorType;
- case ETC1:
case ALPHA_8:
- case LUMINANCE_8:
+ return kAlpha_8_SkColorType;
case RGB_565:
+ return kRGB_565_SkColorType;
+ case LUMINANCE_8:
+ return kGray_8_SkColorType;
+ case ETC1:
case RED_8:
case LUMINANCE_F16:
- NOTREACHED();
- break;
+ return kN32_SkColorType;
}
NOTREACHED();
return kN32_SkColorType;
diff --git a/cc/resources/resource_format.h b/cc/resources/resource_format.h
index 5266f31..4095408 100644
--- a/cc/resources/resource_format.h
+++ b/cc/resources/resource_format.h
@@ -31,7 +31,7 @@
RESOURCE_FORMAT_MAX = LUMINANCE_F16,
};
-SkColorType ResourceFormatToSkColorType(ResourceFormat format);
+SkColorType ResourceFormatToClosestSkColorType(ResourceFormat format);
CC_EXPORT int BitsPerPixel(ResourceFormat format);
CC_EXPORT GLenum GLDataType(ResourceFormat format);
diff --git a/cc/tiles/gpu_image_decode_controller.cc b/cc/tiles/gpu_image_decode_controller.cc
index da9bd207..91f0a33 100644
--- a/cc/tiles/gpu_image_decode_controller.cc
+++ b/cc/tiles/gpu_image_decode_controller.cc
@@ -4,35 +4,82 @@
#include "cc/tiles/gpu_image_decode_controller.h"
+#include "base/memory/discardable_memory_allocator.h"
+#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/stringprintf.h"
+#include "base/thread_task_runner_handle.h"
#include "cc/debug/devtools_instrumentation.h"
+#include "cc/output/context_provider.h"
#include "cc/raster/tile_task_runner.h"
+#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu_image_decode_controller.h"
#include "skia/ext/refptr.h"
+#include "skia/ext/texture_handle.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+#include "third_party/skia/include/gpu/GrTexture.h"
+#include "ui/gfx/skia_util.h"
+#include "ui/gl/trace_util.h"
namespace cc {
+namespace {
+static const int kMaxGpuImageBytes = 1024 * 1024 * 96;
+static const int kMaxDiscardableItems = 2000;
+
+// Returns true if an image would not be drawn and should therefore be
+// skipped rather than decoded.
+bool SkipImage(const DrawImage& draw_image) {
+ if (!SkIRect::Intersects(draw_image.src_rect(), draw_image.image()->bounds()))
+ return true;
+ if (std::abs(draw_image.scale().width()) <
+ std::numeric_limits<float>::epsilon() ||
+ std::abs(draw_image.scale().height()) <
+ std::numeric_limits<float>::epsilon()) {
+ return true;
+ }
+ return false;
+}
+
+SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage(
+ const DrawImage& draw_image) {
+ SkImage::DeferredTextureImageUsageParams params;
+ params.fMatrix = draw_image.matrix();
+ params.fQuality = draw_image.filter_quality();
+
+ return params;
+}
+
+} // namespace
+
+// Task which decodes an image and stores the result in discardable memory.
+// This task does not use GPU resources and can be run on any thread.
class ImageDecodeTaskImpl : public ImageDecodeTask {
public:
ImageDecodeTaskImpl(GpuImageDecodeController* controller,
- const DrawImage& image,
+ const DrawImage& draw_image,
uint64_t source_prepare_tiles_id)
: controller_(controller),
- image_(image),
- image_ref_(skia::SharePtr(image.image())),
- source_prepare_tiles_id_(source_prepare_tiles_id) {}
+ image_(draw_image),
+ image_ref_(skia::SharePtr(draw_image.image())),
+ source_prepare_tiles_id_(source_prepare_tiles_id) {
+ DCHECK(!SkipImage(draw_image));
+ }
// Overridden from Task:
void RunOnWorkerThread() override {
TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu",
"source_prepare_tiles_id", source_prepare_tiles_id_);
- devtools_instrumentation::ScopedImageDecodeTask image_decode_task(
- image_ref_.get());
controller_->DecodeImage(image_);
}
// Overridden from TileTask:
void ScheduleOnOriginThread(TileTaskClient* client) override {}
void CompleteOnOriginThread(TileTaskClient* client) override {
- controller_->RemovePendingTaskForImage(image_);
+ controller_->DecodeTaskCompleted(image_);
}
protected:
@@ -42,61 +89,655 @@
GpuImageDecodeController* controller_;
DrawImage image_;
skia::RefPtr<const SkImage> image_ref_;
- uint64_t source_prepare_tiles_id_;
+ const uint64_t source_prepare_tiles_id_;
DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl);
};
-GpuImageDecodeController::GpuImageDecodeController() {}
+// Task which creates an image from decoded data. Typically this involves
+// uploading data to the GPU, which requires this task be run on the non-
+// concurrent thread.
+class ImageUploadTaskImpl : public ImageDecodeTask {
+ public:
+ ImageUploadTaskImpl(GpuImageDecodeController* controller,
+ const DrawImage& draw_image,
+ scoped_refptr<ImageDecodeTask> decode_dependency,
+ uint64_t source_prepare_tiles_id)
+ : ImageDecodeTask(std::move(decode_dependency)),
+ controller_(controller),
+ image_(draw_image),
+ image_ref_(skia::SharePtr(draw_image.image())),
+ source_prepare_tiles_id_(source_prepare_tiles_id) {
+ DCHECK(!SkipImage(draw_image));
+ }
-GpuImageDecodeController::~GpuImageDecodeController() {}
+ // Override from Task:
+ void RunOnWorkerThread() override {
+ TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu",
+ "source_prepare_tiles_id", source_prepare_tiles_id_);
+ controller_->UploadImage(image_);
+ }
+
+ void ScheduleOnOriginThread(TileTaskClient* client) override {}
+ void CompleteOnOriginThread(TileTaskClient* client) override {
+ controller_->UploadTaskCompleted(image_);
+ }
+
+ // Override from ImageDecodeTask:
+ bool SupportsConcurrentExecution() const override { return false; }
+
+ protected:
+ ~ImageUploadTaskImpl() override {}
+
+ private:
+ GpuImageDecodeController* controller_;
+ DrawImage image_;
+ skia::RefPtr<const SkImage> image_ref_;
+ uint64_t source_prepare_tiles_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl);
+};
+
+GpuImageDecodeController::DecodedImageData::DecodedImageData()
+ : ref_count(0), is_locked(false), decode_failure(false) {}
+
+GpuImageDecodeController::DecodedImageData::~DecodedImageData() = default;
+
+GpuImageDecodeController::UploadedImageData::UploadedImageData()
+ : budgeted(false), ref_count(0) {}
+
+GpuImageDecodeController::UploadedImageData::~UploadedImageData() = default;
+
+GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode,
+ size_t size)
+ : mode(mode), size(size), is_at_raster(false) {}
+
+GpuImageDecodeController::ImageData::~ImageData() = default;
+
+GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context,
+ ResourceFormat decode_format)
+ : format_(decode_format),
+ context_(context),
+ context_threadsafe_proxy_(
+ skia::AdoptRef(context->GrContext()->threadSafeProxy())),
+ image_data_(ImageDataMRUCache::NO_AUTO_EVICT),
+ cached_items_limit_(kMaxDiscardableItems),
+ cached_bytes_limit_(kMaxGpuImageBytes),
+ bytes_used_(0) {}
+
+GpuImageDecodeController::~GpuImageDecodeController() {
+ // SetShouldAggressivelyFreeResources will zero our limits and free all
+ // outstanding image memory.
+ SetShouldAggressivelyFreeResources(true);
+}
bool GpuImageDecodeController::GetTaskForImageAndRef(
- const DrawImage& image,
+ const DrawImage& draw_image,
uint64_t prepare_tiles_id,
scoped_refptr<ImageDecodeTask>* task) {
- auto image_id = image.image()->uniqueID();
- base::AutoLock lock(lock_);
- if (prerolled_images_.count(image_id) != 0) {
+ if (SkipImage(draw_image)) {
*task = nullptr;
return false;
}
- scoped_refptr<ImageDecodeTask>& existing_task =
- pending_image_tasks_[image_id];
- if (!existing_task) {
- existing_task = make_scoped_refptr(
- new ImageDecodeTaskImpl(this, image, prepare_tiles_id));
+ base::AutoLock lock(lock_);
+ const auto image_id = draw_image.image()->uniqueID();
+
+ auto found = image_data_.Get(image_id);
+ if (found != image_data_.end()) {
+ ImageData* image_data = found->second.get();
+ if (image_data->is_at_raster) {
+ // Image is at-raster, just return, this usage will be at-raster as well.
+ *task = nullptr;
+ return false;
+ }
+
+ if (image_data->upload.image) {
+ // The image is already uploaded, ref and return.
+ RefImage(draw_image);
+ *task = nullptr;
+ return true;
+ }
}
+
+ // We didn't have a pre-uploaded image, so we need an upload task. Try to find
+ // an existing one.
+ scoped_refptr<ImageDecodeTask>& existing_task =
+ pending_image_upload_tasks_[image_id];
+ if (existing_task) {
+ // We had an existing upload task, ref the image and return the task.
+ RefImage(draw_image);
+ *task = existing_task;
+ return true;
+ }
+
+ // We will be creating a new upload task. If necessary, create a placeholder
+ // ImageData to hold the result.
+ std::unique_ptr<ImageData> new_data;
+ ImageData* data;
+ if (found == image_data_.end()) {
+ new_data = CreateImageData(draw_image);
+ data = new_data.get();
+ } else {
+ data = found->second.get();
+ }
+
+ // Ensure that the image we're about to decode/upload will fit in memory.
+ if (!EnsureCapacity(data->size)) {
+ // Image will not fit, do an at-raster decode.
+ *task = nullptr;
+ return false;
+ }
+
+ // If we had to create new image data, add it to our map now that we know it
+ // will fit.
+ if (new_data)
+ found = image_data_.Put(image_id, std::move(new_data));
+
+ // Ref image and create a upload and decode tasks. We will release this ref
+ // in UploadTaskCompleted.
+ RefImage(draw_image);
+ existing_task = make_scoped_refptr(new ImageUploadTaskImpl(
+ this, draw_image, GetImageDecodeTaskAndRef(draw_image, prepare_tiles_id),
+ prepare_tiles_id));
+
+ // Ref the image again - this ref is owned by the caller, and it is their
+ // responsibility to release it by calling UnrefImage.
+ RefImage(draw_image);
*task = existing_task;
- return false;
+ return true;
}
-void GpuImageDecodeController::UnrefImage(const DrawImage& image) {
- NOTREACHED();
+void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) {
+ base::AutoLock lock(lock_);
+ UnrefImageInternal(draw_image);
}
DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw(
const DrawImage& draw_image) {
- return DecodedDrawImage(draw_image.image(), draw_image.filter_quality());
+ // We are being called during raster. The context lock must already be
+ // acquired by the caller.
+ context_->GetLock()->AssertAcquired();
+
+ if (SkipImage(draw_image))
+ return DecodedDrawImage(nullptr, draw_image.filter_quality());
+
+ TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw");
+
+ base::AutoLock lock(lock_);
+ const uint32_t unique_id = draw_image.image()->uniqueID();
+ auto found = image_data_.Peek(unique_id);
+ if (found == image_data_.end()) {
+ // We didn't find the image, create a new entry.
+ auto data = CreateImageData(draw_image);
+ found = image_data_.Put(unique_id, std::move(data));
+ }
+
+ ImageData* image_data = found->second.get();
+
+ if (!image_data->upload.budgeted) {
+ // If image data is not budgeted by this point, it is at-raster.
+ image_data->is_at_raster = true;
+ }
+
+ // Ref the image and decode so that they stay alive while we are
+ // decoding/uploading.
+ RefImage(draw_image);
+ RefImageDecode(draw_image);
+
+ // We may or may not need to decode and upload the image we've found, the
+ // following functions early-out to if we already decoded.
+ DecodeImageIfNecessary(draw_image, image_data);
+ UploadImageIfNecessary(draw_image, image_data);
+ // Unref the image decode, but not the image. The image ref will be released
+ // in DrawWithImageFinished.
+ UnrefImageDecode(draw_image);
+
+ SkImage* image = image_data->upload.image.get();
+ DCHECK(image || image_data->decode.decode_failure);
+
+ DecodedDrawImage decoded_draw_image(image, draw_image.filter_quality());
+ decoded_draw_image.set_at_raster_decode(image_data->is_at_raster);
+ return decoded_draw_image;
}
void GpuImageDecodeController::DrawWithImageFinished(
- const DrawImage& image,
- const DecodedDrawImage& decoded_image) {}
+ const DrawImage& draw_image,
+ const DecodedDrawImage& decoded_draw_image) {
+ // We are being called during raster. The context lock must already be
+ // acquired by the caller.
+ context_->GetLock()->AssertAcquired();
-void GpuImageDecodeController::ReduceCacheUsage() {}
+ if (SkipImage(draw_image))
+ return;
-void GpuImageDecodeController::DecodeImage(const DrawImage& image) {
- image.image()->preroll();
base::AutoLock lock(lock_);
- prerolled_images_.insert(image.image()->uniqueID());
+ UnrefImageInternal(draw_image);
+
+ // We are mid-draw and holding the context lock, ensure we clean up any
+ // textures (especially at-raster), which may have just been marked for
+ // deletion by UnrefImage.
+ DeletePendingImages();
}
-void GpuImageDecodeController::RemovePendingTaskForImage(
- const DrawImage& image) {
+void GpuImageDecodeController::ReduceCacheUsage() {
base::AutoLock lock(lock_);
- pending_image_tasks_.erase(image.image()->uniqueID());
+ EnsureCapacity(0);
+}
+
+void GpuImageDecodeController::SetShouldAggressivelyFreeResources(
+ bool aggressively_free_resources) {
+ if (aggressively_free_resources) {
+ ContextProvider::ScopedContextLock context_lock(context_);
+ base::AutoLock lock(lock_);
+ // We want to keep as little in our cache as possible. Set our memory limit
+ // to zero and EnsureCapacity to clean up memory.
+ cached_bytes_limit_ = 0;
+ EnsureCapacity(0);
+
+ // We are holding the context lock, so finish cleaning up deleted images
+ // now.
+ DeletePendingImages();
+ } else {
+ base::AutoLock lock(lock_);
+ cached_bytes_limit_ = kMaxGpuImageBytes;
+ }
+}
+
+void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) {
+ base::AutoLock lock(lock_);
+ auto found = image_data_.Peek(draw_image.image()->uniqueID());
+ DCHECK(found != image_data_.end());
+ DCHECK(!found->second->is_at_raster);
+ DecodeImageIfNecessary(draw_image, found->second.get());
+}
+
+void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) {
+ ContextProvider::ScopedContextLock context_lock(context_);
+ base::AutoLock lock(lock_);
+ auto found = image_data_.Peek(draw_image.image()->uniqueID());
+ DCHECK(found != image_data_.end());
+ DCHECK(!found->second->is_at_raster);
+ UploadImageIfNecessary(draw_image, found->second.get());
+}
+
+void GpuImageDecodeController::DecodeTaskCompleted(
+ const DrawImage& draw_image) {
+ base::AutoLock lock(lock_);
+ // Decode task is complete, remove it from our list of pending tasks.
+ pending_image_decode_tasks_.erase(draw_image.image()->uniqueID());
+
+ // While the decode task is active, we keep a ref on the decoded data.
+ // Release that ref now.
+ UnrefImageDecode(draw_image);
+}
+
+void GpuImageDecodeController::UploadTaskCompleted(
+ const DrawImage& draw_image) {
+ base::AutoLock lock(lock_);
+ // Upload task is complete, remove it from our list of pending tasks.
+ pending_image_upload_tasks_.erase(draw_image.image()->uniqueID());
+
+ // While the upload task is active, we keep a ref on both the image it will be
+ // populating, as well as the decode it needs to populate it. Release these
+ // refs now.
+ UnrefImageDecode(draw_image);
+ UnrefImageInternal(draw_image);
+}
+
+// Checks if an existing image decode exists. If not, returns a task to produce
+// the requested decode.
+scoped_refptr<ImageDecodeTask>
+GpuImageDecodeController::GetImageDecodeTaskAndRef(const DrawImage& draw_image,
+ uint64_t prepare_tiles_id) {
+ lock_.AssertAcquired();
+
+ const uint32_t image_id = draw_image.image()->uniqueID();
+
+ // This ref is kept alive while an upload task may need this decode. We
+ // release this ref in UploadTaskCompleted.
+ RefImageDecode(draw_image);
+
+ auto found = image_data_.Peek(image_id);
+ if (found != image_data_.end() && found->second->decode.is_locked) {
+ // We should never be creating a decode task for an at raster image.
+ DCHECK(!found->second->is_at_raster);
+ // We should never be creating a decode for an already-uploaded image.
+ DCHECK(!found->second->upload.image);
+ return nullptr;
+ }
+
+ // We didn't have an existing locked image, create a task to lock or decode.
+ scoped_refptr<ImageDecodeTask>& existing_task =
+ pending_image_decode_tasks_[image_id];
+ if (!existing_task) {
+ // Ref image decode and create a decode task. This ref will be released in
+ // DecodeTaskCompleted.
+ RefImageDecode(draw_image);
+ existing_task = make_scoped_refptr(
+ new ImageDecodeTaskImpl(this, draw_image, prepare_tiles_id));
+ }
+ return existing_task;
+}
+
+void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) {
+ lock_.AssertAcquired();
+ auto found = image_data_.Peek(draw_image.image()->uniqueID());
+ DCHECK(found != image_data_.end());
+ ++found->second->decode.ref_count;
+ RefCountChanged(found->second.get());
+}
+
+void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) {
+ lock_.AssertAcquired();
+ auto found = image_data_.Peek(draw_image.image()->uniqueID());
+ DCHECK(found != image_data_.end());
+ DCHECK_GT(found->second->decode.ref_count, 0u);
+ --found->second->decode.ref_count;
+ RefCountChanged(found->second.get());
+}
+
+void GpuImageDecodeController::RefImage(const DrawImage& draw_image) {
+ lock_.AssertAcquired();
+ auto found = image_data_.Peek(draw_image.image()->uniqueID());
+ DCHECK(found != image_data_.end());
+ ++found->second->upload.ref_count;
+ RefCountChanged(found->second.get());
+}
+
+void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) {
+ lock_.AssertAcquired();
+ auto found = image_data_.Peek(draw_image.image()->uniqueID());
+ DCHECK(found != image_data_.end());
+ DCHECK_GT(found->second->upload.ref_count, 0u);
+ --found->second->upload.ref_count;
+ RefCountChanged(found->second.get());
+}
+
+// Called any time an image or decode ref count changes. Takes care of any
+// necessary memory budget book-keeping and cleanup.
+void GpuImageDecodeController::RefCountChanged(ImageData* image_data) {
+ lock_.AssertAcquired();
+
+ bool has_any_refs =
+ image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
+ if (image_data->is_at_raster && !has_any_refs) {
+ // We have an at-raster image which has reached zero refs. If it won't fit
+ // in our cache, delete the image to allow it to fit.
+ if (image_data->upload.image && !CanFitSize(image_data->size)) {
+ images_pending_deletion_.push_back(std::move(image_data->upload.image));
+ image_data->upload.image = nullptr;
+ }
+
+ // We now have an at-raster image which will fit in our cache. Convert it
+ // to not-at-raster.
+ image_data->is_at_raster = false;
+ if (image_data->upload.image) {
+ bytes_used_ += image_data->size;
+ image_data->upload.budgeted = true;
+ }
+ }
+
+ // If we have image refs on a non-at-raster image, it must be budgeted, as it
+ // is either uploaded or pending upload.
+ if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted &&
+ !image_data->is_at_raster) {
+ // We should only be taking non-at-raster refs on images that fit in cache.
+ DCHECK(CanFitSize(image_data->size));
+
+ bytes_used_ += image_data->size;
+ image_data->upload.budgeted = true;
+ }
+
+ // If we have no image refs on an image, it should only be budgeted if it has
+ // an uploaded image. If no image exists (upload was cancelled), we should
+ // un-budget the image.
+ if (image_data->upload.ref_count == 0 && image_data->upload.budgeted &&
+ !image_data->upload.image) {
+ DCHECK_GE(bytes_used_, image_data->size);
+ bytes_used_ -= image_data->size;
+ image_data->upload.budgeted = false;
+ }
+
+ // If we have no decode refs on an image, we should unlock any locked
+ // discardable memory.
+ if (image_data->decode.ref_count == 0 && image_data->decode.is_locked) {
+ DCHECK(image_data->decode.data);
+ image_data->decode.data->Unlock();
+ image_data->decode.is_locked = false;
+ }
+}
+
+// Ensures that we can fit a new image of size |required_size| in our cache. In
+// doing so, this function will free unreferenced image data as necessary to
+// create rooom.
+bool GpuImageDecodeController::EnsureCapacity(size_t required_size) {
+ lock_.AssertAcquired();
+
+ if (CanFitSize(required_size) && !ExceedsPreferredCount())
+ return true;
+
+ // While we are over memory or preferred item capacity, we iterate through
+ // our set of cached image data in LRU order. For each image, we can do two
+ // things: 1) We can free the uploaded image, reducing the memory usage of
+ // the cache and 2) we can remove the entry entirely, reducing the count of
+ // elements in the cache.
+ for (auto it = image_data_.rbegin(); it != image_data_.rend();) {
+ if (it->second->decode.ref_count != 0 ||
+ it->second->upload.ref_count != 0) {
+ ++it;
+ continue;
+ }
+
+ // Current entry has no refs. Ensure it is not locked.
+ DCHECK(!it->second->decode.is_locked);
+
+ // If an image without refs is budgeted, it must have an associated image
+ // upload.
+ DCHECK(!it->second->upload.budgeted || it->second->upload.image);
+
+ // Free the uploaded image if possible.
+ if (it->second->upload.image) {
+ DCHECK(it->second->upload.budgeted);
+ DCHECK_GE(bytes_used_, it->second->size);
+ bytes_used_ -= it->second->size;
+ images_pending_deletion_.push_back(std::move(it->second->upload.image));
+ it->second->upload.image = nullptr;
+ it->second->upload.budgeted = false;
+ }
+
+ // Free the entire entry if necessary.
+ if (ExceedsPreferredCount()) {
+ it = image_data_.Erase(it);
+ } else {
+ ++it;
+ }
+
+ if (CanFitSize(required_size) && !ExceedsPreferredCount())
+ return true;
+ }
+
+ // Preferred count is only used as a guideline when triming the cache. Allow
+ // new elements to be added as long as we are below our size limit.
+ return CanFitSize(required_size);
+}
+
+bool GpuImageDecodeController::CanFitSize(size_t size) const {
+ lock_.AssertAcquired();
+
+ base::CheckedNumeric<uint32_t> new_size(bytes_used_);
+ new_size += size;
+ return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_;
+}
+
+bool GpuImageDecodeController::ExceedsPreferredCount() const {
+ lock_.AssertAcquired();
+
+ return image_data_.size() > cached_items_limit_;
+}
+
+void GpuImageDecodeController::DecodeImageIfNecessary(
+ const DrawImage& draw_image,
+ ImageData* image_data) {
+ lock_.AssertAcquired();
+
+ DCHECK_GT(image_data->decode.ref_count, 0u);
+
+ if (image_data->decode.decode_failure) {
+ // We have already tried and failed to decode this image. Don't try again.
+ return;
+ }
+
+ if (image_data->upload.image) {
+ // We already have an uploaded image, no reason to decode.
+ return;
+ }
+
+ if (image_data->decode.data &&
+ (image_data->decode.is_locked || image_data->decode.data->Lock())) {
+ // We already decoded this, or we just needed to lock, early out.
+ image_data->decode.is_locked = true;
+ return;
+ }
+
+ TRACE_EVENT0("cc", "GpuImageDecodeController::DecodeImage");
+
+ image_data->decode.data = nullptr;
+ std::unique_ptr<base::DiscardableMemory> backing_memory;
+ {
+ base::AutoUnlock unlock(lock_);
+ switch (image_data->mode) {
+ case DecodedDataMode::CPU: {
+ backing_memory =
+ base::DiscardableMemoryAllocator::GetInstance()
+ ->AllocateLockedDiscardableMemory(image_data->size);
+ SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image);
+ if (!draw_image.image()->readPixels(image_info, backing_memory->data(),
+ image_info.minRowBytes(), 0, 0,
+ SkImage::kDisallow_CachingHint)) {
+ backing_memory.reset();
+ }
+ break;
+ }
+ case DecodedDataMode::GPU: {
+ backing_memory =
+ base::DiscardableMemoryAllocator::GetInstance()
+ ->AllocateLockedDiscardableMemory(image_data->size);
+ auto params = ParamsFromDrawImage(draw_image);
+ if (!draw_image.image()->getDeferredTextureImageData(
+ *context_threadsafe_proxy_.get(), ¶ms, 1,
+ backing_memory->data())) {
+ backing_memory.reset();
+ }
+ break;
+ }
+ }
+ }
+
+ if (image_data->decode.data) {
+ // An at-raster task decoded this before us. Ingore our decode.
+ return;
+ }
+
+ if (!backing_memory) {
+ // If |backing_memory| was not populated, we had a non-decodable image.
+ image_data->decode.decode_failure = true;
+ return;
+ }
+
+ image_data->decode.data = std::move(backing_memory);
+ DCHECK(!image_data->decode.is_locked);
+ image_data->decode.is_locked = true;
+}
+
+void GpuImageDecodeController::UploadImageIfNecessary(
+ const DrawImage& draw_image,
+ ImageData* image_data) {
+ context_->GetLock()->AssertAcquired();
+ lock_.AssertAcquired();
+
+ if (image_data->decode.decode_failure) {
+ // We were unnable to decode this image. Don't try to upload.
+ return;
+ }
+
+ if (image_data->upload.image) {
+ // Someone has uploaded this image before us (at raster).
+ return;
+ }
+
+ TRACE_EVENT0("cc", "GpuImageDecodeController::UploadImage");
+ DCHECK(image_data->decode.is_locked);
+ DCHECK_GT(image_data->decode.ref_count, 0u);
+ DCHECK_GT(image_data->upload.ref_count, 0u);
+
+ // We are about to upload a new image and are holding the context lock.
+ // Ensure that any images which have been marked for deletion are actually
+ // cleaned up so we don't exceed our memory limit during this upload.
+ DeletePendingImages();
+
+ skia::RefPtr<SkImage> uploaded_image;
+ {
+ base::AutoUnlock unlock(lock_);
+ switch (image_data->mode) {
+ case DecodedDataMode::CPU: {
+ SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image);
+ uploaded_image = skia::AdoptRef(SkImage::NewFromRaster(
+ image_info, image_data->decode.data->data(),
+ image_info.minRowBytes(), [](const void*, void*) {}, nullptr));
+ break;
+ }
+ case DecodedDataMode::GPU: {
+ uploaded_image =
+ skia::AdoptRef(SkImage::NewFromDeferredTextureImageData(
+ context_->GrContext(), image_data->decode.data->data(),
+ SkBudgeted::kNo));
+ break;
+ }
+ }
+ }
+ DCHECK(uploaded_image);
+
+ // At-raster may have decoded this while we were unlocked. If so, ignore our
+ // result.
+ if (!image_data->upload.image) {
+ image_data->upload.image = std::move(uploaded_image);
+ }
+}
+
+std::unique_ptr<GpuImageDecodeController::ImageData>
+GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) {
+ lock_.AssertAcquired();
+
+ DecodedDataMode mode;
+ SkImageInfo info = CreateImageInfoForDrawImage(draw_image);
+ SkImage::DeferredTextureImageUsageParams params =
+ ParamsFromDrawImage(draw_image);
+ size_t data_size = draw_image.image()->getDeferredTextureImageData(
+ *context_threadsafe_proxy_.get(), ¶ms, 1, nullptr);
+
+ if (data_size == 0) {
+ // Can't upload image, too large or other failure. Try to use SW fallback.
+ data_size = info.getSafeSize(info.minRowBytes());
+ mode = DecodedDataMode::CPU;
+ } else {
+ mode = DecodedDataMode::GPU;
+ }
+
+ return base::WrapUnique(new ImageData(mode, data_size));
+}
+
+void GpuImageDecodeController::DeletePendingImages() {
+ context_->GetLock()->AssertAcquired();
+ lock_.AssertAcquired();
+ images_pending_deletion_.clear();
+}
+
+SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage(
+ const DrawImage& draw_image) const {
+ return SkImageInfo::Make(
+ draw_image.image()->width(), draw_image.image()->height(),
+ ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType);
}
} // namespace cc
diff --git a/cc/tiles/gpu_image_decode_controller.h b/cc/tiles/gpu_image_decode_controller.h
index 390ef8ed..10b03e13 100644
--- a/cc/tiles/gpu_image_decode_controller.h
+++ b/cc/tiles/gpu_image_decode_controller.h
@@ -5,21 +5,60 @@
#ifndef CC_TILES_GPU_IMAGE_DECODE_CONTROLLER_H_
#define CC_TILES_GPU_IMAGE_DECODE_CONTROLLER_H_
+#include <memory>
#include <unordered_map>
-#include <unordered_set>
+#include <vector>
+#include "base/containers/mru_cache.h"
+#include "base/memory/discardable_memory.h"
#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "cc/base/cc_export.h"
+#include "cc/resources/resource_format.h"
#include "cc/tiles/image_decode_controller.h"
+#include "skia/ext/refptr.h"
+
+class SkImageTextureData;
namespace cc {
+class ContextProvider;
+
+// GpuImageDecodeController handles the decode and upload of images that will
+// be used by Skia's GPU raster path. It also maintains a cache of these
+// decoded/uploaded images for later re-use.
+//
+// Generally, when an image is required for raster, GpuImageDecodeController
+// creates two tasks, one to decode the image, and one to upload the image to
+// the GPU. These tasks are completed before the raster task which depends on
+// the image. We need to seperate decode and upload tasks, as decode can occur
+// simultaneously on multiple threads, while upload requires the GL context
+// lock must happen on our non-concurrent raster thread.
+//
+// Decoded and Uploaded image data share a single cache entry. Depending on how
+// far we've progressed, this cache entry may contain CPU-side decoded data,
+// GPU-side uploaded data, or both. Because CPU-side decoded data is stored in
+// discardable memory, and is only locked for short periods of time (until the
+// upload completes), this memory is not counted against our sized cache
+// limits. Uploaded GPU memory, being non-discardable, always counts against
+// our limits.
+//
+// In cases where the number of images needed exceeds our cache limits, we
+// operate in an "at-raster" mode. In this mode, there are no decode/upload
+// tasks, and images are decoded/uploaded as needed, immediately before being
+// used in raster. Cache entries for at-raster tasks are marked as such, which
+// prevents future tasks from taking a dependency on them and extending their
+// lifetime longer than is necessary.
class CC_EXPORT GpuImageDecodeController : public ImageDecodeController {
public:
- GpuImageDecodeController();
+ explicit GpuImageDecodeController(ContextProvider* context,
+ ResourceFormat decode_format);
~GpuImageDecodeController() override;
// ImageDecodeController overrides.
+
+ // Finds the existing uploaded image for the provided DrawImage. Creates an
+ // upload task to upload the image if an exsiting image does not exist.
bool GetTaskForImageAndRef(const DrawImage& image,
uint64_t prepare_tiles_id,
scoped_refptr<ImageDecodeTask>* task) override;
@@ -28,17 +67,124 @@
void DrawWithImageFinished(const DrawImage& image,
const DecodedDrawImage& decoded_image) override;
void ReduceCacheUsage() override;
+ void SetShouldAggressivelyFreeResources(
+ bool aggressively_free_resources) override;
+ // Called by Decode / Upload tasks.
void DecodeImage(const DrawImage& image);
+ void UploadImage(const DrawImage& image);
+ void DecodeTaskCompleted(const DrawImage& image);
+ void UploadTaskCompleted(const DrawImage& image);
- void RemovePendingTaskForImage(const DrawImage& image);
+ // For testing only.
+ void SetCachedItemLimitForTesting(size_t limit) {
+ cached_items_limit_ = limit;
+ }
+ void SetCachedBytesLimitForTesting(size_t limit) {
+ cached_bytes_limit_ = limit;
+ }
+ size_t GetBytesUsedForTesting() const { return bytes_used_; }
private:
+ enum class DecodedDataMode { GPU, CPU };
+
+ // Stores the CPU-side decoded bits of an image and supporting fields.
+ struct DecodedImageData {
+ DecodedImageData();
+ ~DecodedImageData();
+
+ // May be null if image not yet decoded.
+ std::unique_ptr<base::DiscardableMemory> data;
+ uint32_t ref_count;
+ bool is_locked;
+
+ // Set to true if the image was corrupt and could not be decoded.
+ bool decode_failure;
+ };
+
+ // Stores the GPU-side image and supporting fields.
+ struct UploadedImageData {
+ UploadedImageData();
+ ~UploadedImageData();
+
+ // May be null if image not yet uploaded / prepared.
+ skia::RefPtr<SkImage> image;
+ // True if the image is counting against our memory limits.
+ bool budgeted;
+ uint32_t ref_count;
+ };
+
+ struct ImageData {
+ ImageData(DecodedDataMode mode, size_t size);
+ ~ImageData();
+
+ const DecodedDataMode mode;
+ const size_t size;
+ bool is_at_raster;
+
+ DecodedImageData decode;
+ UploadedImageData upload;
+ };
+
+ using ImageDataMRUCache =
+ base::MRUCache<uint32_t, std::unique_ptr<ImageData>>;
+
+ // All private functions should only be called while holding |lock_|. Some
+ // functions also require the |context_| lock. These are indicated by
+ // additional comments.
+
+ // Similar to GetTaskForImageAndRef, but gets the dependent decode task
+ // rather than the upload task, if necessary.
+ scoped_refptr<ImageDecodeTask> GetImageDecodeTaskAndRef(
+ const DrawImage& image,
+ uint64_t prepare_tiles_id);
+
+ void RefImageDecode(const DrawImage& draw_image);
+ void UnrefImageDecode(const DrawImage& draw_image);
+ void RefImage(const DrawImage& draw_image);
+ void UnrefImageInternal(const DrawImage& draw_image);
+ void RefCountChanged(ImageData* image_data);
+
+ // Ensures that the cache can hold an element of |required_size|, freeing
+ // unreferenced cache entries if necessary to make room.
+ bool EnsureCapacity(size_t required_size);
+ bool CanFitSize(size_t size) const;
+ bool ExceedsPreferredCount() const;
+
+ void DecodeImageIfNecessary(const DrawImage& draw_image,
+ ImageData* image_data);
+
+ std::unique_ptr<GpuImageDecodeController::ImageData> CreateImageData(
+ const DrawImage& image);
+ SkImageInfo CreateImageInfoForDrawImage(const DrawImage& draw_image) const;
+
+ // The following two functions also require the |context_| lock to be held.
+ void UploadImageIfNecessary(const DrawImage& draw_image,
+ ImageData* image_data);
+ void DeletePendingImages();
+
+ const ResourceFormat format_;
+ ContextProvider* context_;
+ skia::RefPtr<GrContextThreadSafeProxy> context_threadsafe_proxy_;
+
+ // All members below this point must only be accessed while holding |lock_|.
base::Lock lock_;
- std::unordered_set<uint32_t> prerolled_images_;
std::unordered_map<uint32_t, scoped_refptr<ImageDecodeTask>>
- pending_image_tasks_;
+ pending_image_upload_tasks_;
+ std::unordered_map<uint32_t, scoped_refptr<ImageDecodeTask>>
+ pending_image_decode_tasks_;
+
+ ImageDataMRUCache image_data_;
+
+ size_t cached_items_limit_;
+ size_t cached_bytes_limit_;
+ size_t bytes_used_;
+
+ // We can't release GPU backed SkImages without holding the context lock,
+ // so we add them to this list and defer deletion until the next time the lock
+ // is held.
+ std::vector<skia::RefPtr<SkImage>> images_pending_deletion_;
};
} // namespace cc
diff --git a/cc/tiles/gpu_image_decode_controller_unittest.cc b/cc/tiles/gpu_image_decode_controller_unittest.cc
new file mode 100644
index 0000000..fef46e9
--- /dev/null
+++ b/cc/tiles/gpu_image_decode_controller_unittest.cc
@@ -0,0 +1,606 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/tiles/gpu_image_decode_controller.h"
+
+#include "cc/playback/draw_image.h"
+#include "cc/raster/tile_task_runner.h"
+#include "cc/test/test_context_provider.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+skia::RefPtr<SkImage> CreateImage(int width, int height) {
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+ SkBitmap bitmap;
+ bitmap.allocPixels(info);
+ return skia::AdoptRef(SkImage::NewFromBitmap(bitmap));
+}
+
+SkMatrix CreateMatrix(const SkSize& scale, bool is_decomposable) {
+ SkMatrix matrix;
+ matrix.setScale(scale.width(), scale.height());
+
+ if (!is_decomposable) {
+ // Perspective is not decomposable, add it.
+ matrix[SkMatrix::kMPersp0] = 0.1f;
+ }
+
+ return matrix;
+}
+
+void ScheduleTask(ImageDecodeTask* task) {
+ task->WillSchedule();
+ task->ScheduleOnOriginThread(nullptr);
+ task->DidSchedule();
+}
+
+void RunTask(ImageDecodeTask* task) {
+ task->WillRun();
+ task->RunOnWorkerThread();
+ task->DidRun();
+}
+
+void CompleteTask(ImageDecodeTask* task) {
+ task->WillComplete();
+ task->CompleteOnOriginThread(nullptr);
+ task->DidComplete();
+}
+
+void ProcessTask(ImageDecodeTask* task) {
+ ScheduleTask(task);
+ RunTask(task);
+ CompleteTask(task);
+}
+
+TEST(GpuImageDecodeControllerTest, GetTaskForImageSameImage) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ bool is_decomposable = true;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+ uint64_t prepare_tiles_id = 1;
+
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+
+ DrawImage another_draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(1.5f, 1.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> another_task;
+ need_unref = controller.GetTaskForImageAndRef(
+ another_draw_image, prepare_tiles_id, &another_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task.get() == another_task.get());
+
+ ProcessTask(task->dependency().get());
+ ProcessTask(task.get());
+
+ controller.UnrefImage(draw_image);
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, GetTaskForImageDifferentImage) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> first_image = CreateImage(100, 100);
+ DrawImage first_draw_image(
+ first_image.get(),
+ SkIRect::MakeWH(first_image->width(), first_image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> first_task;
+ bool need_unref = controller.GetTaskForImageAndRef(
+ first_draw_image, prepare_tiles_id, &first_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(first_task);
+
+ skia::RefPtr<SkImage> second_image = CreateImage(100, 100);
+ DrawImage second_draw_image(
+ second_image.get(),
+ SkIRect::MakeWH(second_image->width(), second_image->height()), quality,
+ CreateMatrix(SkSize::Make(0.25f, 0.25f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> second_task;
+ need_unref = controller.GetTaskForImageAndRef(second_draw_image,
+ prepare_tiles_id, &second_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(second_task);
+ EXPECT_TRUE(first_task.get() != second_task.get());
+
+ ProcessTask(first_task->dependency().get());
+ ProcessTask(first_task.get());
+ ProcessTask(second_task->dependency().get());
+ ProcessTask(second_task.get());
+
+ controller.UnrefImage(first_draw_image);
+ controller.UnrefImage(second_draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, GetTaskForImageAlreadyDecoded) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+ EXPECT_TRUE(task->dependency());
+
+ ProcessTask(task->dependency().get());
+ ScheduleTask(task.get());
+ RunTask(task.get());
+
+ scoped_refptr<ImageDecodeTask> another_task;
+ need_unref = controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id,
+ &another_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_FALSE(another_task);
+
+ CompleteTask(task.get());
+
+ controller.UnrefImage(draw_image);
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, GetTaskForImageCanceledGetsNewTask) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+
+ ProcessTask(task->dependency().get());
+ ScheduleTask(task.get());
+
+ scoped_refptr<ImageDecodeTask> another_task;
+ need_unref = controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id,
+ &another_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(another_task.get() == task.get());
+
+ // Didn't run the task, complete it (it was canceled).
+ CompleteTask(task.get());
+
+ // Fully cancel everything (so the raster would unref things).
+ controller.UnrefImage(draw_image);
+ controller.UnrefImage(draw_image);
+
+ // Here a new task is created.
+ scoped_refptr<ImageDecodeTask> third_task;
+ need_unref = controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id,
+ &third_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(third_task);
+ EXPECT_FALSE(third_task.get() == task.get());
+
+ ProcessTask(third_task->dependency().get());
+ ProcessTask(third_task.get());
+
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest,
+ GetTaskForImageCanceledWhileReffedGetsNewTask) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+
+ ProcessTask(task->dependency().get());
+ ScheduleTask(task.get());
+
+ scoped_refptr<ImageDecodeTask> another_task;
+ need_unref = controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id,
+ &another_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(another_task.get() == task.get());
+
+ // Didn't run the task, complete it (it was canceled).
+ CompleteTask(task.get());
+
+ // Note that here, everything is reffed, but a new task is created. This is
+ // possible with repeated schedule/cancel operations.
+ scoped_refptr<ImageDecodeTask> third_task;
+ need_unref = controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id,
+ &third_task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(third_task);
+ EXPECT_FALSE(third_task.get() == task.get());
+
+ ProcessTask(third_task->dependency().get());
+ ProcessTask(third_task.get());
+
+ // 3 Unrefs!
+ controller.UnrefImage(draw_image);
+ controller.UnrefImage(draw_image);
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, GetDecodedImageForDraw) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+
+ ProcessTask(task->dependency().get());
+ ProcessTask(task.get());
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_TRUE(decoded_draw_image.image());
+ EXPECT_TRUE(decoded_draw_image.image()->isTextureBacked());
+ EXPECT_FALSE(decoded_draw_image.is_at_raster_decode());
+
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, GetLargeDecodedImageForDraw) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(1, 24000);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+
+ ProcessTask(task->dependency().get());
+ ProcessTask(task.get());
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_TRUE(decoded_draw_image.image());
+ EXPECT_FALSE(decoded_draw_image.image()->isTextureBacked());
+ EXPECT_FALSE(decoded_draw_image.is_at_raster_decode());
+
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, GetDecodedImageForDrawAtRasterDecode) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ controller.SetCachedItemLimitForTesting(0);
+ controller.SetCachedBytesLimitForTesting(0);
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_FALSE(need_unref);
+ EXPECT_FALSE(task);
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_TRUE(decoded_draw_image.image());
+ EXPECT_TRUE(decoded_draw_image.image()->isTextureBacked());
+ EXPECT_TRUE(decoded_draw_image.is_at_raster_decode());
+
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, AtRasterUsedDirectlyIfSpaceAllows) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ controller.SetCachedItemLimitForTesting(0);
+ controller.SetCachedBytesLimitForTesting(0);
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_FALSE(need_unref);
+ EXPECT_FALSE(task);
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_TRUE(decoded_draw_image.image());
+ EXPECT_TRUE(decoded_draw_image.image()->isTextureBacked());
+ EXPECT_TRUE(decoded_draw_image.is_at_raster_decode());
+
+ controller.SetCachedItemLimitForTesting(1000);
+ controller.SetCachedBytesLimitForTesting(96 * 1024 * 1024);
+
+ // Finish our draw after increasing the memory limit, image should be added to
+ // cache.
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+
+ scoped_refptr<ImageDecodeTask> another_task;
+ bool another_task_needs_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(another_task_needs_unref);
+ EXPECT_FALSE(another_task);
+ controller.UnrefImage(draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest,
+ GetDecodedImageForDrawAtRasterDecodeMultipleTimes) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ controller.SetCachedItemLimitForTesting(0);
+ controller.SetCachedBytesLimitForTesting(0);
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_TRUE(decoded_draw_image.image());
+ EXPECT_TRUE(decoded_draw_image.image()->isTextureBacked());
+ EXPECT_TRUE(decoded_draw_image.is_at_raster_decode());
+
+ DecodedDrawImage another_decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_EQ(decoded_draw_image.image()->uniqueID(),
+ another_decoded_draw_image.image()->uniqueID());
+
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+ controller.DrawWithImageFinished(draw_image, another_decoded_draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, ZeroSizedImagesAreSkipped) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.f, 0.f), is_decomposable));
+
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_FALSE(task);
+ EXPECT_FALSE(need_unref);
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_FALSE(decoded_draw_image.image());
+
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, NonOverlappingSrcRectImagesAreSkipped) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeXYWH(150, 150, image->width(), image->height()),
+ quality, CreateMatrix(SkSize::Make(1.f, 1.f), is_decomposable));
+
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_FALSE(task);
+ EXPECT_FALSE(need_unref);
+
+ // Must hold context lock before calling GetDecodedImageForDraw /
+ // DrawWithImageFinished.
+ ContextProvider::ScopedContextLock context_lock(context_provider.get());
+ DecodedDrawImage decoded_draw_image =
+ controller.GetDecodedImageForDraw(draw_image);
+ EXPECT_FALSE(decoded_draw_image.image());
+
+ controller.DrawWithImageFinished(draw_image, decoded_draw_image);
+}
+
+TEST(GpuImageDecodeControllerTest, CanceledTasksDoNotCountAgainstBudget) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeXYWH(0, 0, image->width(), image->height()),
+ quality, CreateMatrix(SkSize::Make(1.f, 1.f), is_decomposable));
+
+ scoped_refptr<ImageDecodeTask> task;
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_NE(0u, controller.GetBytesUsedForTesting());
+ EXPECT_TRUE(task);
+ EXPECT_TRUE(need_unref);
+
+ ScheduleTask(task->dependency().get());
+ CompleteTask(task->dependency().get());
+ ScheduleTask(task.get());
+ CompleteTask(task.get());
+
+ controller.UnrefImage(draw_image);
+ EXPECT_EQ(0u, controller.GetBytesUsedForTesting());
+}
+
+TEST(GpuImageDecodeControllerTest, ShouldAggressivelyFreeResources) {
+ auto context_provider = TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ GpuImageDecodeController controller(context_provider.get(),
+ ResourceFormat::RGBA_8888);
+ bool is_decomposable = true;
+ uint64_t prepare_tiles_id = 1;
+ SkFilterQuality quality = kHigh_SkFilterQuality;
+
+ skia::RefPtr<SkImage> image = CreateImage(100, 100);
+ DrawImage draw_image(
+ image.get(), SkIRect::MakeWH(image->width(), image->height()), quality,
+ CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable));
+ scoped_refptr<ImageDecodeTask> task;
+ {
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+ }
+
+ ProcessTask(task->dependency().get());
+ ProcessTask(task.get());
+
+ controller.UnrefImage(draw_image);
+
+ // We should now have data image in our cache.
+ DCHECK_GT(controller.GetBytesUsedForTesting(), 0u);
+
+ // Tell our controller to aggressively free resources.
+ controller.SetShouldAggressivelyFreeResources(true);
+ DCHECK_EQ(0u, controller.GetBytesUsedForTesting());
+
+ // Attempting to upload a new image should result in at-raster decode.
+ {
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_FALSE(need_unref);
+ EXPECT_FALSE(task);
+ }
+
+ // We now tell the controller to not aggressively free resources. Uploads
+ // should work again.
+ controller.SetShouldAggressivelyFreeResources(false);
+ {
+ bool need_unref =
+ controller.GetTaskForImageAndRef(draw_image, prepare_tiles_id, &task);
+ EXPECT_TRUE(need_unref);
+ EXPECT_TRUE(task);
+ }
+
+ ProcessTask(task->dependency().get());
+ ProcessTask(task.get());
+
+ // The image should be in our cache after un-ref.
+ controller.UnrefImage(draw_image);
+ DCHECK_GT(controller.GetBytesUsedForTesting(), 0u);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/tiles/image_decode_controller.h b/cc/tiles/image_decode_controller.h
index 4bf03d8..6d3cd29a 100644
--- a/cc/tiles/image_decode_controller.h
+++ b/cc/tiles/image_decode_controller.h
@@ -62,6 +62,11 @@
// This function informs the controller that now is a good time to clean up
// memory. This is called periodically from the compositor thread.
virtual void ReduceCacheUsage() = 0;
+
+ // This function informs the controller that we are hidden and should not be
+ // retaining cached resources longer than needed.
+ virtual void SetShouldAggressivelyFreeResources(
+ bool aggressively_free_resources) = 0;
};
} // namespace cc
diff --git a/cc/tiles/software_image_decode_controller.cc b/cc/tiles/software_image_decode_controller.cc
index 3f52af5..8ed5a0eb 100644
--- a/cc/tiles/software_image_decode_controller.cc
+++ b/cc/tiles/software_image_decode_controller.cc
@@ -112,33 +112,11 @@
return std::min(key.filter_quality(), kLow_SkFilterQuality);
}
-SkColorType SkColorTypeForDecoding(ResourceFormat format) {
- // Use kN32_SkColorType if there is no corresponding SkColorType.
- switch (format) {
- case RGBA_4444:
- return kARGB_4444_SkColorType;
- case RGBA_8888:
- case BGRA_8888:
- return kN32_SkColorType;
- case ALPHA_8:
- return kAlpha_8_SkColorType;
- case RGB_565:
- return kRGB_565_SkColorType;
- case LUMINANCE_8:
- return kGray_8_SkColorType;
- case ETC1:
- case RED_8:
- case LUMINANCE_F16:
- return kN32_SkColorType;
- }
- NOTREACHED();
- return kN32_SkColorType;
-}
-
SkImageInfo CreateImageInfo(size_t width,
size_t height,
ResourceFormat format) {
- return SkImageInfo::Make(width, height, SkColorTypeForDecoding(format),
+ return SkImageInfo::Make(width, height,
+ ResourceFormatToClosestSkColorType(format),
kPremul_SkAlphaType);
}
diff --git a/cc/tiles/software_image_decode_controller.h b/cc/tiles/software_image_decode_controller.h
index a6262e9d..a94c45f7 100644
--- a/cc/tiles/software_image_decode_controller.h
+++ b/cc/tiles/software_image_decode_controller.h
@@ -116,6 +116,9 @@
void DrawWithImageFinished(const DrawImage& image,
const DecodedDrawImage& decoded_image) override;
void ReduceCacheUsage() override;
+ // Software doesn't keep outstanding images pinned, so this is a no-op.
+ void SetShouldAggressivelyFreeResources(
+ bool aggressively_free_resources) override {}
// Decode the given image and store it in the cache. This is only called by an
// image decode task from a worker thread.
diff --git a/cc/tiles/tile_manager.cc b/cc/tiles/tile_manager.cc
index 2af48b5..e2f362d 100644
--- a/cc/tiles/tile_manager.cc
+++ b/cc/tiles/tile_manager.cc
@@ -155,6 +155,23 @@
TaskGraph::Node(task, category, priority, dependencies));
}
+void InsertNodeForDecodeTask(TaskGraph* graph,
+ ImageDecodeTask* task,
+ uint16_t category,
+ uint16_t priority) {
+ uint32_t dependency_count = 0u;
+ auto* dependency = task->dependency().get();
+ if (dependency && !dependency->HasCompleted()) {
+ InsertNodeForDecodeTask(graph, dependency, category, priority);
+ graph->edges.push_back(TaskGraph::Edge(dependency, task));
+ dependency_count = 1u;
+ }
+ InsertNodeForTask(graph, task, task->SupportsConcurrentExecution()
+ ? category
+ : TASK_CATEGORY_NONCONCURRENT_FOREGROUND,
+ priority, dependency_count);
+}
+
void InsertNodesForRasterTask(TaskGraph* graph,
RasterTask* raster_task,
const ImageDecodeTask::Vector& decode_tasks,
@@ -209,7 +226,8 @@
}
if (decode_it == graph->nodes.end()) {
- InsertNodeForTask(graph, decode_task, decode_task_category, priority, 0u);
+ InsertNodeForDecodeTask(graph, decode_task, decode_task_category,
+ priority);
}
graph->edges.push_back(TaskGraph::Edge(decode_task, raster_task));
diff --git a/cc/trees/layer_tree_host_impl.cc b/cc/trees/layer_tree_host_impl.cc
index c3b8bf71..76a1506 100644
--- a/cc/trees/layer_tree_host_impl.cc
+++ b/cc/trees/layer_tree_host_impl.cc
@@ -1241,14 +1241,21 @@
gpu::MemoryAllocation::CUTOFF_ALLOW_NOTHING);
global_tile_state_.num_resources_limit = policy.num_resources_limit;
- if (output_surface_ && global_tile_state_.hard_memory_limit_in_bytes > 0) {
+ if (global_tile_state_.hard_memory_limit_in_bytes > 0) {
// If |global_tile_state_.hard_memory_limit_in_bytes| is greater than 0, we
- // allow the worker context to retain allocated resources. Notify the worker
- // context. If the memory policy has become zero, we'll handle the
- // notification in NotifyAllTileTasksCompleted, after in-progress work
- // finishes.
- output_surface_->SetWorkerContextShouldAggressivelyFreeResources(
- false /* aggressively_free_resources */);
+ // allow the worker context and image decode controller to retain allocated
+ // resources. Notify them here. If the memory policy has become zero, we'll
+ // handle the notification in NotifyAllTileTasksCompleted, after
+ // in-progress work finishes.
+ if (output_surface_) {
+ output_surface_->SetWorkerContextShouldAggressivelyFreeResources(
+ false /* aggressively_free_resources */);
+ }
+
+ if (image_decode_controller_) {
+ image_decode_controller_->SetShouldAggressivelyFreeResources(
+ false /* aggressively_free_resources */);
+ }
}
DCHECK(resource_pool_);
@@ -1320,9 +1327,15 @@
void LayerTreeHostImpl::NotifyAllTileTasksCompleted() {
// The tile tasks started by the most recent call to PrepareTiles have
// completed. Now is a good time to free resources if necessary.
- if (output_surface_ && global_tile_state_.hard_memory_limit_in_bytes == 0) {
- output_surface_->SetWorkerContextShouldAggressivelyFreeResources(
- true /* aggressively_free_resources */);
+ if (global_tile_state_.hard_memory_limit_in_bytes == 0) {
+ if (output_surface_) {
+ output_surface_->SetWorkerContextShouldAggressivelyFreeResources(
+ true /* aggressively_free_resources */);
+ }
+ if (image_decode_controller_) {
+ image_decode_controller_->SetShouldAggressivelyFreeResources(
+ true /* aggressively_free_resources */);
+ }
}
}
@@ -2141,7 +2154,9 @@
CreateResourceAndTileTaskWorkerPool(&tile_task_worker_pool_, &resource_pool_);
if (use_gpu_rasterization_) {
- image_decode_controller_ = base::WrapUnique(new GpuImageDecodeController);
+ image_decode_controller_ = base::WrapUnique(new GpuImageDecodeController(
+ output_surface_->worker_context_provider(),
+ settings_.renderer_settings.preferred_tile_format));
} else {
image_decode_controller_ =
base::WrapUnique(new SoftwareImageDecodeController(