diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-03-08 09:36:56 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-03-08 10:57:08 +0000 |
commit | c6eeb4797eb94ad14bb34adfccbc6addad2cfd48 (patch) | |
tree | ec32a5bc5b8c8e3ce3abfb529eb0b5b8365e4ab7 /drivers/gpu/drm/i915/i915_gem.c | |
parent | 604c37d76689d6a0e5492f5ff71886ab83817208 (diff) | |
download | kernel_replicant_linux-c6eeb4797eb94ad14bb34adfccbc6addad2cfd48.tar.gz kernel_replicant_linux-c6eeb4797eb94ad14bb34adfccbc6addad2cfd48.tar.bz2 kernel_replicant_linux-c6eeb4797eb94ad14bb34adfccbc6addad2cfd48.zip |
drm/i915: Reduce presumption of request ordering for barriers
Currently we assume that we know the order in which requests run and so
can determine if we need to reissue a switch-to-kernel-context prior to
idling. That assumption does not hold for the future, so instead of
tracking which barriers have been used, simply determine if we have ever
switched away from the kernel context by using the engine and before
idling ensure that all engines that have been used since the last idle
are synchronously switched back to the kernel context for safety (and
else of shrinking memory while idle).
v2: Use intel_engine_mask_t and ALL_ENGINES
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308093657.8640-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 539ee78f6d9a..961237b90b40 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2845,7 +2845,8 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915) } } -static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) +static bool switch_to_kernel_context_sync(struct drm_i915_private *i915, + unsigned long mask) { bool result = true; @@ -2854,7 +2855,7 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) * to save itself before we report the failure. Yes, this may be a * false positive due to e.g. ENOMEM, caveat emptor! */ - if (i915_gem_switch_to_kernel_context(i915)) + if (i915_gem_switch_to_kernel_context(i915, mask)) result = false; if (i915_gem_wait_for_idle(i915, @@ -2879,7 +2880,8 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) static bool load_power_context(struct drm_i915_private *i915) { - if (!switch_to_kernel_context_sync(i915)) + /* Force loading the kernel context on all engines */ + if (!switch_to_kernel_context_sync(i915, ALL_ENGINES)) return false; /* @@ -2927,7 +2929,7 @@ i915_gem_idle_work_handler(struct work_struct *work) !i915->gt.active_requests) { ++i915->gt.active_requests; /* don't requeue idle */ - switch_to_kernel_context_sync(i915); + switch_to_kernel_context_sync(i915, i915->gt.active_engines); if (!--i915->gt.active_requests) { __i915_gem_park(i915); @@ -4380,7 +4382,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - switch_to_kernel_context_sync(i915); + switch_to_kernel_context_sync(i915, i915->gt.active_engines); mutex_unlock(&i915->drm.struct_mutex); i915_reset_flush(i915); |