forked from flutter/engine
-
Notifications
You must be signed in to change notification settings - Fork 1
/
skia_gpu_object.h
91 lines (72 loc) · 2.77 KB
/
skia_gpu_object.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef FLUTTER_FLOW_SKIA_GPU_OBJECT_H_
#define FLUTTER_FLOW_SKIA_GPU_OBJECT_H_
#include <mutex>
#include <queue>
#include "flutter/fml/memory/ref_counted.h"
#include "flutter/fml/memory/weak_ptr.h"
#include "flutter/fml/task_runner.h"
#include "third_party/skia/include/core/SkRefCnt.h"
#include "third_party/skia/include/gpu/GrDirectContext.h"
namespace flutter {
// A queue that holds Skia objects that must be destructed on the given task
// runner.
class SkiaUnrefQueue : public fml::RefCountedThreadSafe<SkiaUnrefQueue> {
public:
void Unref(SkRefCnt* object);
// Usually, the drain is called automatically. However, during IO manager
// shutdown (when the platform side reference to the OpenGL context is about
// to go away), we may need to pre-emptively drain the unref queue. It is the
// responsibility of the caller to ensure that no further unrefs are queued
// after this call.
void Drain();
private:
const fml::RefPtr<fml::TaskRunner> task_runner_;
const fml::TimeDelta drain_delay_;
std::mutex mutex_;
std::deque<SkRefCnt*> objects_;
bool drain_pending_;
fml::WeakPtr<GrDirectContext> context_;
// The `GrDirectContext* context` is only used for signaling Skia to
// performDeferredCleanup. It can be nullptr when such signaling is not needed
// (e.g., in unit tests).
SkiaUnrefQueue(fml::RefPtr<fml::TaskRunner> task_runner,
fml::TimeDelta delay,
fml::WeakPtr<GrDirectContext> context = {});
~SkiaUnrefQueue();
FML_FRIEND_REF_COUNTED_THREAD_SAFE(SkiaUnrefQueue);
FML_FRIEND_MAKE_REF_COUNTED(SkiaUnrefQueue);
FML_DISALLOW_COPY_AND_ASSIGN(SkiaUnrefQueue);
};
/// An object whose deallocation needs to be performed on an specific unref
/// queue. The template argument U need to have a call operator that returns
/// that unref queue.
template <class T>
class SkiaGPUObject {
public:
using SkiaObjectType = T;
SkiaGPUObject() = default;
SkiaGPUObject(sk_sp<SkiaObjectType> object, fml::RefPtr<SkiaUnrefQueue> queue)
: object_(std::move(object)), queue_(std::move(queue)) {
FML_DCHECK(object_);
}
SkiaGPUObject(SkiaGPUObject&&) = default;
~SkiaGPUObject() { reset(); }
SkiaGPUObject& operator=(SkiaGPUObject&&) = default;
sk_sp<SkiaObjectType> skia_object() const { return object_; }
void reset() {
if (object_ && queue_) {
queue_->Unref(object_.release());
}
queue_ = nullptr;
FML_DCHECK(object_ == nullptr);
}
private:
sk_sp<SkiaObjectType> object_;
fml::RefPtr<SkiaUnrefQueue> queue_;
FML_DISALLOW_COPY_AND_ASSIGN(SkiaGPUObject);
};
} // namespace flutter
#endif // FLUTTER_FLOW_SKIA_GPU_OBJECT_H_