blob: ab683d69abc698c0a9435187e0c3e707ed953f31 [file] [log] [blame]
vmpstr64cdba32016-03-03 00:38:401// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "cc/tiles/gpu_image_decode_controller.h"
6
ericrkd2ff2a132016-04-11 22:16:007#include "base/memory/discardable_memory_allocator.h"
8#include "base/memory/ptr_util.h"
9#include "base/numerics/safe_math.h"
10#include "base/strings/stringprintf.h"
11#include "base/thread_task_runner_handle.h"
vmpstr64cdba32016-03-03 00:38:4012#include "cc/debug/devtools_instrumentation.h"
ericrkd2ff2a132016-04-11 22:16:0013#include "cc/output/context_provider.h"
vmpstr64cdba32016-03-03 00:38:4014#include "cc/raster/tile_task_runner.h"
ericrkd2ff2a132016-04-11 22:16:0015#include "gpu/command_buffer/client/context_support.h"
16#include "gpu/command_buffer/client/gles2_interface.h"
17#include "gpu_image_decode_controller.h"
vmpstr64cdba32016-03-03 00:38:4018#include "skia/ext/refptr.h"
ericrkd2ff2a132016-04-11 22:16:0019#include "skia/ext/texture_handle.h"
20#include "third_party/skia/include/core/SkCanvas.h"
21#include "third_party/skia/include/core/SkSurface.h"
22#include "third_party/skia/include/gpu/GrContext.h"
23#include "third_party/skia/include/gpu/GrTexture.h"
24#include "ui/gfx/skia_util.h"
25#include "ui/gl/trace_util.h"
vmpstr64cdba32016-03-03 00:38:4026
27namespace cc {
ericrkd2ff2a132016-04-11 22:16:0028namespace {
vmpstr64cdba32016-03-03 00:38:4029
ericrkd2ff2a132016-04-11 22:16:0030static const int kMaxGpuImageBytes = 1024 * 1024 * 96;
31static const int kMaxDiscardableItems = 2000;
32
33// Returns true if an image would not be drawn and should therefore be
34// skipped rather than decoded.
35bool SkipImage(const DrawImage& draw_image) {
36 if (!SkIRect::Intersects(draw_image.src_rect(), draw_image.image()->bounds()))
37 return true;
38 if (std::abs(draw_image.scale().width()) <
39 std::numeric_limits<float>::epsilon() ||
40 std::abs(draw_image.scale().height()) <
41 std::numeric_limits<float>::epsilon()) {
42 return true;
43 }
44 return false;
45}
46
47SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage(
48 const DrawImage& draw_image) {
49 SkImage::DeferredTextureImageUsageParams params;
50 params.fMatrix = draw_image.matrix();
51 params.fQuality = draw_image.filter_quality();
52
53 return params;
54}
55
56} // namespace
57
58// Task which decodes an image and stores the result in discardable memory.
59// This task does not use GPU resources and can be run on any thread.
vmpstr64cdba32016-03-03 00:38:4060class ImageDecodeTaskImpl : public ImageDecodeTask {
61 public:
62 ImageDecodeTaskImpl(GpuImageDecodeController* controller,
ericrkd2ff2a132016-04-11 22:16:0063 const DrawImage& draw_image,
vmpstr64cdba32016-03-03 00:38:4064 uint64_t source_prepare_tiles_id)
65 : controller_(controller),
ericrkd2ff2a132016-04-11 22:16:0066 image_(draw_image),
67 image_ref_(skia::SharePtr(draw_image.image())),
68 source_prepare_tiles_id_(source_prepare_tiles_id) {
69 DCHECK(!SkipImage(draw_image));
70 }
vmpstr64cdba32016-03-03 00:38:4071
72 // Overridden from Task:
73 void RunOnWorkerThread() override {
74 TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu",
75 "source_prepare_tiles_id", source_prepare_tiles_id_);
vmpstr64cdba32016-03-03 00:38:4076 controller_->DecodeImage(image_);
77 }
78
79 // Overridden from TileTask:
prashant.n0bab14522016-04-18 07:32:5180 void ScheduleOnOriginThread(RasterBufferProvider* provider) override {}
81 void CompleteOnOriginThread(RasterBufferProvider* provider) override {
ericrkd2ff2a132016-04-11 22:16:0082 controller_->DecodeTaskCompleted(image_);
vmpstr64cdba32016-03-03 00:38:4083 }
84
85 protected:
86 ~ImageDecodeTaskImpl() override {}
87
88 private:
89 GpuImageDecodeController* controller_;
90 DrawImage image_;
91 skia::RefPtr<const SkImage> image_ref_;
ericrkd2ff2a132016-04-11 22:16:0092 const uint64_t source_prepare_tiles_id_;
vmpstr64cdba32016-03-03 00:38:4093
94 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl);
95};
96
ericrkd2ff2a132016-04-11 22:16:0097// Task which creates an image from decoded data. Typically this involves
98// uploading data to the GPU, which requires this task be run on the non-
99// concurrent thread.
100class ImageUploadTaskImpl : public ImageDecodeTask {
101 public:
102 ImageUploadTaskImpl(GpuImageDecodeController* controller,
103 const DrawImage& draw_image,
104 scoped_refptr<ImageDecodeTask> decode_dependency,
105 uint64_t source_prepare_tiles_id)
106 : ImageDecodeTask(std::move(decode_dependency)),
107 controller_(controller),
108 image_(draw_image),
109 image_ref_(skia::SharePtr(draw_image.image())),
110 source_prepare_tiles_id_(source_prepare_tiles_id) {
111 DCHECK(!SkipImage(draw_image));
112 }
vmpstr64cdba32016-03-03 00:38:40113
ericrkd2ff2a132016-04-11 22:16:00114 // Override from Task:
115 void RunOnWorkerThread() override {
116 TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu",
117 "source_prepare_tiles_id", source_prepare_tiles_id_);
118 controller_->UploadImage(image_);
119 }
120
prashant.n0bab14522016-04-18 07:32:51121 void ScheduleOnOriginThread(RasterBufferProvider* provider) override {}
122 void CompleteOnOriginThread(RasterBufferProvider* provider) override {
ericrkd2ff2a132016-04-11 22:16:00123 controller_->UploadTaskCompleted(image_);
124 }
125
126 // Override from ImageDecodeTask:
127 bool SupportsConcurrentExecution() const override { return false; }
128
129 protected:
130 ~ImageUploadTaskImpl() override {}
131
132 private:
133 GpuImageDecodeController* controller_;
134 DrawImage image_;
135 skia::RefPtr<const SkImage> image_ref_;
136 uint64_t source_prepare_tiles_id_;
137
138 DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl);
139};
140
141GpuImageDecodeController::DecodedImageData::DecodedImageData()
142 : ref_count(0), is_locked(false), decode_failure(false) {}
143
144GpuImageDecodeController::DecodedImageData::~DecodedImageData() = default;
145
146GpuImageDecodeController::UploadedImageData::UploadedImageData()
147 : budgeted(false), ref_count(0) {}
148
149GpuImageDecodeController::UploadedImageData::~UploadedImageData() = default;
150
151GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode,
152 size_t size)
153 : mode(mode), size(size), is_at_raster(false) {}
154
155GpuImageDecodeController::ImageData::~ImageData() = default;
156
157GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context,
158 ResourceFormat decode_format)
159 : format_(decode_format),
160 context_(context),
161 context_threadsafe_proxy_(
162 skia::AdoptRef(context->GrContext()->threadSafeProxy())),
163 image_data_(ImageDataMRUCache::NO_AUTO_EVICT),
164 cached_items_limit_(kMaxDiscardableItems),
165 cached_bytes_limit_(kMaxGpuImageBytes),
166 bytes_used_(0) {}
167
168GpuImageDecodeController::~GpuImageDecodeController() {
169 // SetShouldAggressivelyFreeResources will zero our limits and free all
170 // outstanding image memory.
171 SetShouldAggressivelyFreeResources(true);
172}
vmpstr64cdba32016-03-03 00:38:40173
174bool GpuImageDecodeController::GetTaskForImageAndRef(
ericrkd2ff2a132016-04-11 22:16:00175 const DrawImage& draw_image,
vmpstr64cdba32016-03-03 00:38:40176 uint64_t prepare_tiles_id,
177 scoped_refptr<ImageDecodeTask>* task) {
ericrkd2ff2a132016-04-11 22:16:00178 if (SkipImage(draw_image)) {
vmpstr64cdba32016-03-03 00:38:40179 *task = nullptr;
180 return false;
181 }
182
ericrkd2ff2a132016-04-11 22:16:00183 base::AutoLock lock(lock_);
184 const auto image_id = draw_image.image()->uniqueID();
185
186 auto found = image_data_.Get(image_id);
187 if (found != image_data_.end()) {
188 ImageData* image_data = found->second.get();
189 if (image_data->is_at_raster) {
190 // Image is at-raster, just return, this usage will be at-raster as well.
191 *task = nullptr;
192 return false;
193 }
194
195 if (image_data->upload.image) {
196 // The image is already uploaded, ref and return.
197 RefImage(draw_image);
198 *task = nullptr;
199 return true;
200 }
vmpstr64cdba32016-03-03 00:38:40201 }
ericrkd2ff2a132016-04-11 22:16:00202
203 // We didn't have a pre-uploaded image, so we need an upload task. Try to find
204 // an existing one.
205 scoped_refptr<ImageDecodeTask>& existing_task =
206 pending_image_upload_tasks_[image_id];
207 if (existing_task) {
208 // We had an existing upload task, ref the image and return the task.
209 RefImage(draw_image);
210 *task = existing_task;
211 return true;
212 }
213
214 // We will be creating a new upload task. If necessary, create a placeholder
215 // ImageData to hold the result.
216 std::unique_ptr<ImageData> new_data;
217 ImageData* data;
218 if (found == image_data_.end()) {
219 new_data = CreateImageData(draw_image);
220 data = new_data.get();
221 } else {
222 data = found->second.get();
223 }
224
225 // Ensure that the image we're about to decode/upload will fit in memory.
226 if (!EnsureCapacity(data->size)) {
227 // Image will not fit, do an at-raster decode.
228 *task = nullptr;
229 return false;
230 }
231
232 // If we had to create new image data, add it to our map now that we know it
233 // will fit.
234 if (new_data)
235 found = image_data_.Put(image_id, std::move(new_data));
236
237 // Ref image and create a upload and decode tasks. We will release this ref
238 // in UploadTaskCompleted.
239 RefImage(draw_image);
240 existing_task = make_scoped_refptr(new ImageUploadTaskImpl(
241 this, draw_image, GetImageDecodeTaskAndRef(draw_image, prepare_tiles_id),
242 prepare_tiles_id));
243
244 // Ref the image again - this ref is owned by the caller, and it is their
245 // responsibility to release it by calling UnrefImage.
246 RefImage(draw_image);
ericrkbddee542016-03-08 22:11:53247 *task = existing_task;
ericrkd2ff2a132016-04-11 22:16:00248 return true;
vmpstr64cdba32016-03-03 00:38:40249}
250
ericrkd2ff2a132016-04-11 22:16:00251void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) {
252 base::AutoLock lock(lock_);
253 UnrefImageInternal(draw_image);
vmpstr64cdba32016-03-03 00:38:40254}
255
256DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw(
257 const DrawImage& draw_image) {
ericrkd2ff2a132016-04-11 22:16:00258 // We are being called during raster. The context lock must already be
259 // acquired by the caller.
260 context_->GetLock()->AssertAcquired();
261
262 if (SkipImage(draw_image))
263 return DecodedDrawImage(nullptr, draw_image.filter_quality());
264
265 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw");
266
267 base::AutoLock lock(lock_);
268 const uint32_t unique_id = draw_image.image()->uniqueID();
269 auto found = image_data_.Peek(unique_id);
270 if (found == image_data_.end()) {
271 // We didn't find the image, create a new entry.
272 auto data = CreateImageData(draw_image);
273 found = image_data_.Put(unique_id, std::move(data));
274 }
275
276 ImageData* image_data = found->second.get();
277
278 if (!image_data->upload.budgeted) {
279 // If image data is not budgeted by this point, it is at-raster.
280 image_data->is_at_raster = true;
281 }
282
283 // Ref the image and decode so that they stay alive while we are
284 // decoding/uploading.
285 RefImage(draw_image);
286 RefImageDecode(draw_image);
287
288 // We may or may not need to decode and upload the image we've found, the
289 // following functions early-out to if we already decoded.
290 DecodeImageIfNecessary(draw_image, image_data);
291 UploadImageIfNecessary(draw_image, image_data);
292 // Unref the image decode, but not the image. The image ref will be released
293 // in DrawWithImageFinished.
294 UnrefImageDecode(draw_image);
295
296 SkImage* image = image_data->upload.image.get();
297 DCHECK(image || image_data->decode.decode_failure);
298
299 DecodedDrawImage decoded_draw_image(image, draw_image.filter_quality());
300 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster);
301 return decoded_draw_image;
vmpstr64cdba32016-03-03 00:38:40302}
303
304void GpuImageDecodeController::DrawWithImageFinished(
ericrkd2ff2a132016-04-11 22:16:00305 const DrawImage& draw_image,
306 const DecodedDrawImage& decoded_draw_image) {
307 // We are being called during raster. The context lock must already be
308 // acquired by the caller.
309 context_->GetLock()->AssertAcquired();
vmpstr64cdba32016-03-03 00:38:40310
ericrkd2ff2a132016-04-11 22:16:00311 if (SkipImage(draw_image))
312 return;
vmpstr64cdba32016-03-03 00:38:40313
vmpstr64cdba32016-03-03 00:38:40314 base::AutoLock lock(lock_);
ericrkd2ff2a132016-04-11 22:16:00315 UnrefImageInternal(draw_image);
316
317 // We are mid-draw and holding the context lock, ensure we clean up any
318 // textures (especially at-raster), which may have just been marked for
319 // deletion by UnrefImage.
320 DeletePendingImages();
vmpstr64cdba32016-03-03 00:38:40321}
322
ericrkd2ff2a132016-04-11 22:16:00323void GpuImageDecodeController::ReduceCacheUsage() {
vmpstr64cdba32016-03-03 00:38:40324 base::AutoLock lock(lock_);
ericrkd2ff2a132016-04-11 22:16:00325 EnsureCapacity(0);
326}
327
328void GpuImageDecodeController::SetShouldAggressivelyFreeResources(
329 bool aggressively_free_resources) {
330 if (aggressively_free_resources) {
331 ContextProvider::ScopedContextLock context_lock(context_);
332 base::AutoLock lock(lock_);
333 // We want to keep as little in our cache as possible. Set our memory limit
334 // to zero and EnsureCapacity to clean up memory.
335 cached_bytes_limit_ = 0;
336 EnsureCapacity(0);
337
338 // We are holding the context lock, so finish cleaning up deleted images
339 // now.
340 DeletePendingImages();
341 } else {
342 base::AutoLock lock(lock_);
343 cached_bytes_limit_ = kMaxGpuImageBytes;
344 }
345}
346
347void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) {
348 base::AutoLock lock(lock_);
349 auto found = image_data_.Peek(draw_image.image()->uniqueID());
350 DCHECK(found != image_data_.end());
351 DCHECK(!found->second->is_at_raster);
352 DecodeImageIfNecessary(draw_image, found->second.get());
353}
354
355void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) {
356 ContextProvider::ScopedContextLock context_lock(context_);
357 base::AutoLock lock(lock_);
358 auto found = image_data_.Peek(draw_image.image()->uniqueID());
359 DCHECK(found != image_data_.end());
360 DCHECK(!found->second->is_at_raster);
361 UploadImageIfNecessary(draw_image, found->second.get());
362}
363
364void GpuImageDecodeController::DecodeTaskCompleted(
365 const DrawImage& draw_image) {
366 base::AutoLock lock(lock_);
367 // Decode task is complete, remove it from our list of pending tasks.
368 pending_image_decode_tasks_.erase(draw_image.image()->uniqueID());
369
370 // While the decode task is active, we keep a ref on the decoded data.
371 // Release that ref now.
372 UnrefImageDecode(draw_image);
373}
374
375void GpuImageDecodeController::UploadTaskCompleted(
376 const DrawImage& draw_image) {
377 base::AutoLock lock(lock_);
378 // Upload task is complete, remove it from our list of pending tasks.
379 pending_image_upload_tasks_.erase(draw_image.image()->uniqueID());
380
381 // While the upload task is active, we keep a ref on both the image it will be
382 // populating, as well as the decode it needs to populate it. Release these
383 // refs now.
384 UnrefImageDecode(draw_image);
385 UnrefImageInternal(draw_image);
386}
387
388// Checks if an existing image decode exists. If not, returns a task to produce
389// the requested decode.
390scoped_refptr<ImageDecodeTask>
391GpuImageDecodeController::GetImageDecodeTaskAndRef(const DrawImage& draw_image,
392 uint64_t prepare_tiles_id) {
393 lock_.AssertAcquired();
394
395 const uint32_t image_id = draw_image.image()->uniqueID();
396
397 // This ref is kept alive while an upload task may need this decode. We
398 // release this ref in UploadTaskCompleted.
399 RefImageDecode(draw_image);
400
401 auto found = image_data_.Peek(image_id);
402 if (found != image_data_.end() && found->second->decode.is_locked) {
403 // We should never be creating a decode task for an at raster image.
404 DCHECK(!found->second->is_at_raster);
405 // We should never be creating a decode for an already-uploaded image.
406 DCHECK(!found->second->upload.image);
407 return nullptr;
408 }
409
410 // We didn't have an existing locked image, create a task to lock or decode.
411 scoped_refptr<ImageDecodeTask>& existing_task =
412 pending_image_decode_tasks_[image_id];
413 if (!existing_task) {
414 // Ref image decode and create a decode task. This ref will be released in
415 // DecodeTaskCompleted.
416 RefImageDecode(draw_image);
417 existing_task = make_scoped_refptr(
418 new ImageDecodeTaskImpl(this, draw_image, prepare_tiles_id));
419 }
420 return existing_task;
421}
422
423void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) {
424 lock_.AssertAcquired();
425 auto found = image_data_.Peek(draw_image.image()->uniqueID());
426 DCHECK(found != image_data_.end());
427 ++found->second->decode.ref_count;
428 RefCountChanged(found->second.get());
429}
430
431void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) {
432 lock_.AssertAcquired();
433 auto found = image_data_.Peek(draw_image.image()->uniqueID());
434 DCHECK(found != image_data_.end());
435 DCHECK_GT(found->second->decode.ref_count, 0u);
436 --found->second->decode.ref_count;
437 RefCountChanged(found->second.get());
438}
439
440void GpuImageDecodeController::RefImage(const DrawImage& draw_image) {
441 lock_.AssertAcquired();
442 auto found = image_data_.Peek(draw_image.image()->uniqueID());
443 DCHECK(found != image_data_.end());
444 ++found->second->upload.ref_count;
445 RefCountChanged(found->second.get());
446}
447
448void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) {
449 lock_.AssertAcquired();
450 auto found = image_data_.Peek(draw_image.image()->uniqueID());
451 DCHECK(found != image_data_.end());
452 DCHECK_GT(found->second->upload.ref_count, 0u);
453 --found->second->upload.ref_count;
454 RefCountChanged(found->second.get());
455}
456
457// Called any time an image or decode ref count changes. Takes care of any
458// necessary memory budget book-keeping and cleanup.
459void GpuImageDecodeController::RefCountChanged(ImageData* image_data) {
460 lock_.AssertAcquired();
461
462 bool has_any_refs =
463 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
464 if (image_data->is_at_raster && !has_any_refs) {
465 // We have an at-raster image which has reached zero refs. If it won't fit
466 // in our cache, delete the image to allow it to fit.
467 if (image_data->upload.image && !CanFitSize(image_data->size)) {
468 images_pending_deletion_.push_back(std::move(image_data->upload.image));
469 image_data->upload.image = nullptr;
470 }
471
472 // We now have an at-raster image which will fit in our cache. Convert it
473 // to not-at-raster.
474 image_data->is_at_raster = false;
475 if (image_data->upload.image) {
476 bytes_used_ += image_data->size;
477 image_data->upload.budgeted = true;
478 }
479 }
480
481 // If we have image refs on a non-at-raster image, it must be budgeted, as it
482 // is either uploaded or pending upload.
483 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted &&
484 !image_data->is_at_raster) {
485 // We should only be taking non-at-raster refs on images that fit in cache.
486 DCHECK(CanFitSize(image_data->size));
487
488 bytes_used_ += image_data->size;
489 image_data->upload.budgeted = true;
490 }
491
492 // If we have no image refs on an image, it should only be budgeted if it has
493 // an uploaded image. If no image exists (upload was cancelled), we should
494 // un-budget the image.
495 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted &&
496 !image_data->upload.image) {
497 DCHECK_GE(bytes_used_, image_data->size);
498 bytes_used_ -= image_data->size;
499 image_data->upload.budgeted = false;
500 }
501
502 // If we have no decode refs on an image, we should unlock any locked
503 // discardable memory.
504 if (image_data->decode.ref_count == 0 && image_data->decode.is_locked) {
505 DCHECK(image_data->decode.data);
506 image_data->decode.data->Unlock();
507 image_data->decode.is_locked = false;
508 }
509}
510
511// Ensures that we can fit a new image of size |required_size| in our cache. In
512// doing so, this function will free unreferenced image data as necessary to
513// create rooom.
514bool GpuImageDecodeController::EnsureCapacity(size_t required_size) {
515 lock_.AssertAcquired();
516
517 if (CanFitSize(required_size) && !ExceedsPreferredCount())
518 return true;
519
520 // While we are over memory or preferred item capacity, we iterate through
521 // our set of cached image data in LRU order. For each image, we can do two
522 // things: 1) We can free the uploaded image, reducing the memory usage of
523 // the cache and 2) we can remove the entry entirely, reducing the count of
524 // elements in the cache.
525 for (auto it = image_data_.rbegin(); it != image_data_.rend();) {
526 if (it->second->decode.ref_count != 0 ||
527 it->second->upload.ref_count != 0) {
528 ++it;
529 continue;
530 }
531
532 // Current entry has no refs. Ensure it is not locked.
533 DCHECK(!it->second->decode.is_locked);
534
535 // If an image without refs is budgeted, it must have an associated image
536 // upload.
537 DCHECK(!it->second->upload.budgeted || it->second->upload.image);
538
539 // Free the uploaded image if possible.
540 if (it->second->upload.image) {
541 DCHECK(it->second->upload.budgeted);
542 DCHECK_GE(bytes_used_, it->second->size);
543 bytes_used_ -= it->second->size;
544 images_pending_deletion_.push_back(std::move(it->second->upload.image));
545 it->second->upload.image = nullptr;
546 it->second->upload.budgeted = false;
547 }
548
549 // Free the entire entry if necessary.
550 if (ExceedsPreferredCount()) {
551 it = image_data_.Erase(it);
552 } else {
553 ++it;
554 }
555
556 if (CanFitSize(required_size) && !ExceedsPreferredCount())
557 return true;
558 }
559
560 // Preferred count is only used as a guideline when triming the cache. Allow
561 // new elements to be added as long as we are below our size limit.
562 return CanFitSize(required_size);
563}
564
565bool GpuImageDecodeController::CanFitSize(size_t size) const {
566 lock_.AssertAcquired();
567
568 base::CheckedNumeric<uint32_t> new_size(bytes_used_);
569 new_size += size;
570 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_;
571}
572
573bool GpuImageDecodeController::ExceedsPreferredCount() const {
574 lock_.AssertAcquired();
575
576 return image_data_.size() > cached_items_limit_;
577}
578
579void GpuImageDecodeController::DecodeImageIfNecessary(
580 const DrawImage& draw_image,
581 ImageData* image_data) {
582 lock_.AssertAcquired();
583
584 DCHECK_GT(image_data->decode.ref_count, 0u);
585
586 if (image_data->decode.decode_failure) {
587 // We have already tried and failed to decode this image. Don't try again.
588 return;
589 }
590
591 if (image_data->upload.image) {
592 // We already have an uploaded image, no reason to decode.
593 return;
594 }
595
596 if (image_data->decode.data &&
597 (image_data->decode.is_locked || image_data->decode.data->Lock())) {
598 // We already decoded this, or we just needed to lock, early out.
599 image_data->decode.is_locked = true;
600 return;
601 }
602
603 TRACE_EVENT0("cc", "GpuImageDecodeController::DecodeImage");
604
605 image_data->decode.data = nullptr;
606 std::unique_ptr<base::DiscardableMemory> backing_memory;
607 {
608 base::AutoUnlock unlock(lock_);
609 switch (image_data->mode) {
610 case DecodedDataMode::CPU: {
611 backing_memory =
612 base::DiscardableMemoryAllocator::GetInstance()
613 ->AllocateLockedDiscardableMemory(image_data->size);
614 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image);
615 if (!draw_image.image()->readPixels(image_info, backing_memory->data(),
616 image_info.minRowBytes(), 0, 0,
617 SkImage::kDisallow_CachingHint)) {
618 backing_memory.reset();
619 }
620 break;
621 }
622 case DecodedDataMode::GPU: {
623 backing_memory =
624 base::DiscardableMemoryAllocator::GetInstance()
625 ->AllocateLockedDiscardableMemory(image_data->size);
626 auto params = ParamsFromDrawImage(draw_image);
627 if (!draw_image.image()->getDeferredTextureImageData(
628 *context_threadsafe_proxy_.get(), &params, 1,
629 backing_memory->data())) {
630 backing_memory.reset();
631 }
632 break;
633 }
634 }
635 }
636
637 if (image_data->decode.data) {
638 // An at-raster task decoded this before us. Ingore our decode.
639 return;
640 }
641
642 if (!backing_memory) {
643 // If |backing_memory| was not populated, we had a non-decodable image.
644 image_data->decode.decode_failure = true;
645 return;
646 }
647
648 image_data->decode.data = std::move(backing_memory);
649 DCHECK(!image_data->decode.is_locked);
650 image_data->decode.is_locked = true;
651}
652
653void GpuImageDecodeController::UploadImageIfNecessary(
654 const DrawImage& draw_image,
655 ImageData* image_data) {
656 context_->GetLock()->AssertAcquired();
657 lock_.AssertAcquired();
658
659 if (image_data->decode.decode_failure) {
660 // We were unnable to decode this image. Don't try to upload.
661 return;
662 }
663
664 if (image_data->upload.image) {
665 // Someone has uploaded this image before us (at raster).
666 return;
667 }
668
669 TRACE_EVENT0("cc", "GpuImageDecodeController::UploadImage");
670 DCHECK(image_data->decode.is_locked);
671 DCHECK_GT(image_data->decode.ref_count, 0u);
672 DCHECK_GT(image_data->upload.ref_count, 0u);
673
674 // We are about to upload a new image and are holding the context lock.
675 // Ensure that any images which have been marked for deletion are actually
676 // cleaned up so we don't exceed our memory limit during this upload.
677 DeletePendingImages();
678
679 skia::RefPtr<SkImage> uploaded_image;
680 {
681 base::AutoUnlock unlock(lock_);
682 switch (image_data->mode) {
683 case DecodedDataMode::CPU: {
684 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image);
685 uploaded_image = skia::AdoptRef(SkImage::NewFromRaster(
686 image_info, image_data->decode.data->data(),
687 image_info.minRowBytes(), [](const void*, void*) {}, nullptr));
688 break;
689 }
690 case DecodedDataMode::GPU: {
691 uploaded_image =
692 skia::AdoptRef(SkImage::NewFromDeferredTextureImageData(
693 context_->GrContext(), image_data->decode.data->data(),
694 SkBudgeted::kNo));
695 break;
696 }
697 }
698 }
699 DCHECK(uploaded_image);
700
701 // At-raster may have decoded this while we were unlocked. If so, ignore our
702 // result.
703 if (!image_data->upload.image) {
704 image_data->upload.image = std::move(uploaded_image);
705 }
706}
707
708std::unique_ptr<GpuImageDecodeController::ImageData>
709GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) {
710 lock_.AssertAcquired();
711
712 DecodedDataMode mode;
713 SkImageInfo info = CreateImageInfoForDrawImage(draw_image);
714 SkImage::DeferredTextureImageUsageParams params =
715 ParamsFromDrawImage(draw_image);
716 size_t data_size = draw_image.image()->getDeferredTextureImageData(
717 *context_threadsafe_proxy_.get(), &params, 1, nullptr);
718
719 if (data_size == 0) {
720 // Can't upload image, too large or other failure. Try to use SW fallback.
721 data_size = info.getSafeSize(info.minRowBytes());
722 mode = DecodedDataMode::CPU;
723 } else {
724 mode = DecodedDataMode::GPU;
725 }
726
727 return base::WrapUnique(new ImageData(mode, data_size));
728}
729
730void GpuImageDecodeController::DeletePendingImages() {
731 context_->GetLock()->AssertAcquired();
732 lock_.AssertAcquired();
733 images_pending_deletion_.clear();
734}
735
736SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage(
737 const DrawImage& draw_image) const {
738 return SkImageInfo::Make(
739 draw_image.image()->width(), draw_image.image()->height(),
740 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType);
vmpstr64cdba32016-03-03 00:38:40741}
742
743} // namespace cc