aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorRobert Phillips <robertphillips@google.com>2018-02-07 17:08:21 -0500
committerSkia Commit-Bot <skia-commit-bot@chromium.org>2018-02-08 12:36:29 +0000
commit4150eea6c49ecec882a8d3e1c61d6a25fcd1e905 (patch)
tree21b7089d6745f769be88f8f3d9a127d521ff48be /src
parent1f1bb9c0b8d5f50ac74716e6961a6c92f1d373d8 (diff)
downloadplatform_external_skqp-4150eea6c49ecec882a8d3e1c61d6a25fcd1e905.tar.gz
platform_external_skqp-4150eea6c49ecec882a8d3e1c61d6a25fcd1e905.tar.bz2
platform_external_skqp-4150eea6c49ecec882a8d3e1c61d6a25fcd1e905.zip
Move control of explicit GPU resource allocation to GrContextOptions
Change-Id: Ic284acc79bab5936f0007d5ae5fb1e7a9929e2af Reviewed-on: https://skia-review.googlesource.com/104880 Commit-Queue: Robert Phillips <robertphillips@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com> Reviewed-by: Greg Daniel <egdaniel@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/SkTTopoSort.h31
-rw-r--r--src/gpu/GrContext.cpp7
-rw-r--r--src/gpu/GrDrawingManager.cpp41
-rw-r--r--src/gpu/GrDrawingManager.h8
-rw-r--r--src/gpu/GrOpList.cpp31
-rw-r--r--src/gpu/GrRenderTargetContext.cpp14
-rw-r--r--src/gpu/GrResourceAllocator.cpp24
-rw-r--r--src/gpu/GrResourceAllocator.h1
-rw-r--r--src/gpu/GrResourceProvider.cpp6
-rw-r--r--src/gpu/GrResourceProvider.h7
-rw-r--r--src/gpu/GrTextureRenderTargetProxy.cpp3
11 files changed, 99 insertions, 74 deletions
diff --git a/src/core/SkTTopoSort.h b/src/core/SkTTopoSort.h
index 21c80696e7..722707d5b5 100644
--- a/src/core/SkTTopoSort.h
+++ b/src/core/SkTTopoSort.h
@@ -8,22 +8,23 @@
#ifndef SkTTopoSort_DEFINED
#define SkTTopoSort_DEFINED
-#include "SkTDArray.h"
+#include "SkRefCnt.h"
+#include "SkTArray.h"
#ifdef SK_DEBUG
template <typename T, typename Traits = T>
-void SkTTopoSort_CheckAllUnmarked(const SkTDArray<T*>& graph) {
+void SkTTopoSort_CheckAllUnmarked(const SkTArray<sk_sp<T>>& graph) {
for (int i = 0; i < graph.count(); ++i) {
- SkASSERT(!Traits::IsTempMarked(graph[i]));
- SkASSERT(!Traits::WasOutput(graph[i]));
+ SkASSERT(!Traits::IsTempMarked(graph[i].get()));
+ SkASSERT(!Traits::WasOutput(graph[i].get()));
}
}
template <typename T, typename Traits = T>
-void SkTTopoSort_CleanExit(const SkTDArray<T*>& graph) {
+void SkTTopoSort_CleanExit(const SkTArray<sk_sp<T>>& graph) {
for (int i = 0; i < graph.count(); ++i) {
- SkASSERT(!Traits::IsTempMarked(graph[i]));
- SkASSERT(Traits::WasOutput(graph[i]));
+ SkASSERT(!Traits::IsTempMarked(graph[i].get()));
+ SkASSERT(Traits::WasOutput(graph[i].get()));
}
}
#endif
@@ -31,7 +32,7 @@ void SkTTopoSort_CleanExit(const SkTDArray<T*>& graph) {
// Recursively visit a node and all the other nodes it depends on.
// Return false if there is a loop.
template <typename T, typename Traits = T>
-bool SkTTopoSort_Visit(T* node, SkTDArray<T*>* result) {
+bool SkTTopoSort_Visit(T* node, SkTArray<sk_sp<T>>* result) {
if (Traits::IsTempMarked(node)) {
// There is a loop.
return false;
@@ -51,7 +52,7 @@ bool SkTTopoSort_Visit(T* node, SkTDArray<T*>* result) {
Traits::Output(node, result->count()); // mark this node as output
Traits::ResetTempMark(node);
- *result->append() = node;
+ result->push_back(sk_ref_sp(node));
}
return true;
@@ -78,30 +79,30 @@ bool SkTTopoSort_Visit(T* node, SkTDArray<T*>* result) {
// node and all the nodes on which it depends. This could be used to partially
// flush a GrOpList DAG.
template <typename T, typename Traits = T>
-bool SkTTopoSort(SkTDArray<T*>* graph) {
- SkTDArray<T*> result;
+bool SkTTopoSort(SkTArray<sk_sp<T>>* graph) {
+ SkTArray<sk_sp<T>> result;
#ifdef SK_DEBUG
SkTTopoSort_CheckAllUnmarked<T, Traits>(*graph);
#endif
- result.setReserve(graph->count());
+ result.reserve(graph->count());
for (int i = 0; i < graph->count(); ++i) {
- if (Traits::WasOutput((*graph)[i])) {
+ if (Traits::WasOutput((*graph)[i].get())) {
// This node was depended on by some earlier node and has already
// been output
continue;
}
// Output this node after all the nodes it depends on have been output.
- if (!SkTTopoSort_Visit<T, Traits>((*graph)[i], &result)) {
+ if (!SkTTopoSort_Visit<T, Traits>((*graph)[i].get(), &result)) {
return false;
}
}
SkASSERT(graph->count() == result.count());
- graph->swap(result);
+ graph->swap(&result);
#ifdef SK_DEBUG
SkTTopoSort_CleanExit<T, Traits>(*graph);
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 536ad07d2b..dce59f367c 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -239,7 +239,8 @@ bool GrContext::init(const GrContextOptions& options) {
if (fGpu) {
fCaps = fGpu->refCaps();
fResourceCache = new GrResourceCache(fCaps.get(), fUniqueID);
- fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, &fSingleOwner);
+ fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, &fSingleOwner,
+ options.fExplicitlyAllocateGPUResources);
}
fProxyProvider = new GrProxyProvider(fResourceProvider, fResourceCache, fCaps, &fSingleOwner);
@@ -282,8 +283,8 @@ bool GrContext::init(const GrContextOptions& options) {
}
#endif
- fDrawingManager.reset(
- new GrDrawingManager(this, prcOptions, atlasTextContextOptions, &fSingleOwner));
+ fDrawingManager.reset(new GrDrawingManager(this, prcOptions, atlasTextContextOptions,
+ &fSingleOwner, options.fSortRenderTargets));
GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
if (GrContextOptions::Enable::kNo == options.fAllowMultipleGlyphCacheTextures ||
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index ef4177f0f1..703bc0a92d 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -115,11 +115,10 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
}
#endif
-#ifndef SK_DISABLE_RENDER_TARGET_SORTING
- SkDEBUGCODE(bool result =)
- SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
- SkASSERT(result);
-#endif
+ if (fSortRenderTargets) {
+ SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
+ SkASSERT(result);
+ }
GrGpu* gpu = fContext->contextPriv().getGpu();
@@ -179,21 +178,14 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
alloc.markEndOfOpList(i);
}
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
- startIndex = 0;
- stopIndex = fOpLists.count();
-#else
GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
- while (alloc.assign(&startIndex, &stopIndex, &error))
-#endif
- {
-#ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
+ while (alloc.assign(&startIndex, &stopIndex, &error)) {
if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
for (int i = startIndex; i < stopIndex; ++i) {
fOpLists[i]->purgeOpsWithUninstantiatedProxies();
}
}
-#endif
+
if (this->executeOpLists(startIndex, stopIndex, &flushState)) {
flushed = true;
}
@@ -221,6 +213,7 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState) {
SkASSERT(startIndex <= stopIndex && stopIndex <= fOpLists.count());
+ GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider();
bool anyOpListsExecuted = false;
for (int i = startIndex; i < stopIndex; ++i) {
@@ -228,15 +221,19 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt
continue;
}
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
- if (!fOpLists[i]->instantiate(fContext->contextPriv().resourceProvider())) {
- SkDebugf("OpList failed to instantiate.\n");
- fOpLists[i] = nullptr;
- continue;
+ if (resourceProvider->explicitlyAllocateGPUResources()) {
+ if (!fOpLists[i]->isInstantiated()) {
+ // If the backing surface wasn't allocated drop the draw of the entire opList.
+ fOpLists[i] = nullptr;
+ continue;
+ }
+ } else {
+ if (!fOpLists[i]->instantiate(resourceProvider)) {
+ SkDebugf("OpList failed to instantiate.\n");
+ fOpLists[i] = nullptr;
+ continue;
+ }
}
-#else
- SkASSERT(fOpLists[i]->isInstantiated());
-#endif
// TODO: handle this instantiation via lazy surface proxies?
// Instantiate all deferred proxies (being built on worker threads) so we can upload them
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 58b755f005..200d0cafce 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -86,7 +86,8 @@ private:
GrDrawingManager(GrContext* context,
const GrPathRendererChain::Options& optionsForPathRendererChain,
const GrAtlasTextContext::Options& optionsForAtlasTextContext,
- GrSingleOwner* singleOwner)
+ GrSingleOwner* singleOwner,
+ bool sortRenderTargets)
: fContext(context)
, fOptionsForPathRendererChain(optionsForPathRendererChain)
, fOptionsForAtlasTextContext(optionsForAtlasTextContext)
@@ -95,7 +96,9 @@ private:
, fAtlasTextContext(nullptr)
, fPathRendererChain(nullptr)
, fSoftwarePathRenderer(nullptr)
- , fFlushing(false) {}
+ , fFlushing(false)
+ , fSortRenderTargets(sortRenderTargets) {
+ }
void abandon();
void cleanup();
@@ -142,6 +145,7 @@ private:
GrTokenTracker fTokenTracker;
bool fFlushing;
+ bool fSortRenderTargets;
SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
};
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index b63e96ca26..bdaaa2ab4a 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -32,15 +32,16 @@ GrOpList::GrOpList(GrResourceProvider* resourceProvider,
fTarget.setProxy(sk_ref_sp(surfaceProxy), kWrite_GrIOType);
fTarget.get()->setLastOpList(this);
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
- // MDB TODO: remove this! We are currently moving to having all the ops that target
- // the RT as a dest (e.g., clear, etc.) rely on the opList's 'fTarget' pointer
- // for the IO Ref. This works well but until they are all swapped over (and none
- // are pre-emptively instantiating proxies themselves) we need to instantiate
- // here so that the GrSurfaces are created in an order that preserves the GrSurface
- // re-use assumptions.
- fTarget.get()->instantiate(resourceProvider);
-#endif
+ if (resourceProvider && !resourceProvider->explicitlyAllocateGPUResources()) {
+ // MDB TODO: remove this! We are currently moving to having all the ops that target
+ // the RT as a dest (e.g., clear, etc.) rely on the opList's 'fTarget' pointer
+ // for the IO Ref. This works well but until they are all swapped over (and none
+ // are pre-emptively instantiating proxies themselves) we need to instantiate
+ // here so that the GrSurfaces are created in an order that preserves the GrSurface
+ // re-use assumptions.
+ fTarget.get()->instantiate(resourceProvider);
+ }
+
fTarget.markPendingIO();
}
@@ -67,11 +68,11 @@ void GrOpList::endFlush() {
void GrOpList::instantiateDeferredProxies(GrResourceProvider* resourceProvider) {
for (int i = 0; i < fDeferredProxies.count(); ++i) {
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
- fDeferredProxies[i]->instantiate(resourceProvider);
-#else
- SkASSERT(fDeferredProxies[i]->priv().isInstantiated());
-#endif
+ if (resourceProvider->explicitlyAllocateGPUResources()) {
+ SkASSERT(fDeferredProxies[i]->priv().isInstantiated());
+ } else {
+ fDeferredProxies[i]->instantiate(resourceProvider);
+ }
}
}
@@ -118,11 +119,11 @@ void GrOpList::addDependency(GrSurfaceProxy* dependedOn, const GrCaps& caps) {
}
}
-#ifdef SK_DEBUG
bool GrOpList::isInstantiated() const {
return fTarget.get()->priv().isInstantiated();
}
+#ifdef SK_DEBUG
void GrOpList::dump() const {
SkDebugf("--------------------------------------------------------------\n");
SkDebugf("node: %d -> RT: %d\n", fUniqueID, fTarget.get() ? fTarget.get()->uniqueID().asUInt()
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index d4cfb7f0a6..cb7b677759 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -155,12 +155,14 @@ GrRenderTargetContext::GrRenderTargetContext(GrContext* context,
, fOpList(sk_ref_sp(fRenderTargetProxy->getLastRenderTargetOpList()))
, fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
, fManagedOpList(managedOpList) {
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
- // MDB TODO: to ensure all resources still get allocated in the correct order in the hybrid
- // world we need to get the correct opList here so that it, in turn, can grab and hold
- // its rendertarget.
- this->getRTOpList();
-#endif
+ GrResourceProvider* resourceProvider = context->contextPriv().resourceProvider();
+ if (resourceProvider && !resourceProvider->explicitlyAllocateGPUResources()) {
+ // MDB TODO: to ensure all resources still get allocated in the correct order in the hybrid
+ // world we need to get the correct opList here so that it, in turn, can grab and hold
+ // its rendertarget.
+ this->getRTOpList();
+ }
+
fTextTarget.reset(new TextTarget(this));
SkDEBUGCODE(this->validate();)
}
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 6ed3d4dc43..f41169c110 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -36,11 +36,9 @@ void GrResourceAllocator::markEndOfOpList(int opListIndex) {
}
GrResourceAllocator::~GrResourceAllocator() {
-#ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
SkASSERT(fIntvlList.empty());
SkASSERT(fActiveIntvls.empty());
SkASSERT(!fIntvlHash.count());
-#endif
}
void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end
@@ -79,12 +77,12 @@ void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start,
fIntvlList.insertByIncreasingStart(newIntvl);
fIntvlHash.add(newIntvl);
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
- // FIXME: remove this once we can do the lazy instantiation from assign instead.
- if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
- proxy->priv().doLazyInstantiation(fResourceProvider);
+ if (!fResourceProvider->explicitlyAllocateGPUResources()) {
+ // FIXME: remove this once we can do the lazy instantiation from assign instead.
+ if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
+ proxy->priv().doLazyInstantiation(fResourceProvider);
+ }
}
-#endif
}
GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
@@ -131,6 +129,13 @@ void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
}
}
+
+ GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
+ Interval* tmp = fHead;
+ fHead = nullptr;
+ return tmp;
+}
+
// 'surface' can be reused. Add it back to the free pool.
void GrResourceAllocator::freeUpSurface(sk_sp<GrSurface> surface) {
const GrScratchKey &key = surface->resourcePriv().getScratchKey();
@@ -207,6 +212,11 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* o
*startIndex = fCurOpListIndex;
*stopIndex = fEndOfOpListOpIndices.count();
+ if (!fResourceProvider->explicitlyAllocateGPUResources()) {
+ fIntvlList.detachAll(); // arena allocator will clean these up for us
+ return true;
+ }
+
SkDEBUGCODE(fAssigned = true;)
while (Interval* cur = fIntvlList.popHead()) {
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index bbc577d47d..f25bef309e 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -166,6 +166,7 @@ private:
Interval* popHead();
void insertByIncreasingStart(Interval*);
void insertByIncreasingEnd(Interval*);
+ Interval* detachAll();
private:
Interval* fHead = nullptr;
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index ec94968e21..c9575b26dc 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -33,13 +33,15 @@ const uint32_t GrResourceProvider::kMinScratchTextureSize = 16;
#define ASSERT_SINGLE_OWNER \
SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
-GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
+GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner,
+ bool explicitlyAllocateGPUResources)
: fCache(cache)
, fGpu(gpu)
+ , fExplicitlyAllocateGPUResources(explicitlyAllocateGPUResources)
#ifdef SK_DEBUG
, fSingleOwner(owner)
#endif
- {
+{
fCaps = sk_ref_sp(fGpu->caps());
GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index 385282d6f6..7b5fb60ea1 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -39,7 +39,7 @@ class SkTypeface;
*/
class GrResourceProvider {
public:
- GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner);
+ GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*, bool explicitlyAllocate);
/**
* Finds a resource in the cache, based on the specified key. Prior to calling this, the caller
@@ -258,6 +258,10 @@ public:
inline GrResourceProviderPriv priv();
inline const GrResourceProviderPriv priv() const;
+ bool explicitlyAllocateGPUResources() const { return fExplicitlyAllocateGPUResources; }
+
+ bool testingOnly_setExplicitlyAllocateGPUResources(bool newValue);
+
private:
sk_sp<GrGpuResource> findResourceByUniqueKey(const GrUniqueKey&);
@@ -297,6 +301,7 @@ private:
GrGpu* fGpu;
sk_sp<const GrCaps> fCaps;
GrUniqueKey fQuadIndexBufferKey;
+ bool fExplicitlyAllocateGPUResources;
// In debug builds we guard against improper thread handling
SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
diff --git a/src/gpu/GrTextureRenderTargetProxy.cpp b/src/gpu/GrTextureRenderTargetProxy.cpp
index f1e47ec32b..3312077236 100644
--- a/src/gpu/GrTextureRenderTargetProxy.cpp
+++ b/src/gpu/GrTextureRenderTargetProxy.cpp
@@ -108,7 +108,8 @@ sk_sp<GrSurface> GrTextureRenderTargetProxy::createSurface(
void GrTextureRenderTargetProxy::validateLazySurface(const GrSurface* surface) {
// Anything checked here should also be checking the GrTextureProxy version
SkASSERT(surface->asTexture());
- SkASSERT(surface->asTexture()->texturePriv().mipMapped() == this->mipMapped());
+ SkASSERT(GrMipMapped::kNo == this->mipMapped() ||
+ GrMipMapped::kYes == surface->asTexture()->texturePriv().mipMapped());
// Anything checked here should also be checking the GrRenderTargetProxy version
SkASSERT(surface->asRenderTarget());