aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_evict.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-28 10:23:53 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-01-28 16:24:13 +0000
commit09d7e46b97c663c9b7f5245871a8f19114e9148d (patch)
treedc83634a393aba66c727d33782f43c36abc87c45 /drivers/gpu/drm/i915/i915_gem_evict.c
parent499197dc169601116e106cabe409bf39295893b3 (diff)
downloadkernel_replicant_linux-09d7e46b97c663c9b7f5245871a8f19114e9148d.tar.gz
kernel_replicant_linux-09d7e46b97c663c9b7f5245871a8f19114e9148d.tar.bz2
kernel_replicant_linux-09d7e46b97c663c9b7f5245871a8f19114e9148d.zip
drm/i915: Pull VM lists under the VM mutex.
A starting point to counter the pervasive struct_mutex. For the goal of avoiding (or at least blocking under them!) global locks during user request submission, a simple but important step is being able to manage each clients GTT separately. For which, we want to replace using the struct_mutex as the guard for all things GTT/VM and switch instead to a specific mutex inside i915_address_space. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_evict.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d76839670632..68d74c50ac39 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -430,6 +430,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
}
INIT_LIST_HEAD(&eviction_list);
+ mutex_lock(&vm->mutex);
list_for_each_entry(vma, &vm->bound_list, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
@@ -437,6 +438,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
+ mutex_unlock(&vm->mutex);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {