@@ -189,21 +189,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
189189 int tiling_mode , unsigned int stride )
190190{
191191 struct i915_ggtt * ggtt = & to_i915 (obj -> base .dev )-> ggtt ;
192- struct i915_vma * vma ;
192+ struct i915_vma * vma , * vn ;
193+ LIST_HEAD (unbind );
193194 int ret = 0 ;
194195
195196 if (tiling_mode == I915_TILING_NONE )
196197 return 0 ;
197198
198199 mutex_lock (& ggtt -> vm .mutex );
200+
201+ spin_lock (& obj -> vma .lock );
199202 for_each_ggtt_vma (vma , obj ) {
203+ GEM_BUG_ON (vma -> vm != & ggtt -> vm );
204+
200205 if (i915_vma_fence_prepare (vma , tiling_mode , stride ))
201206 continue ;
202207
208+ list_move (& vma -> vm_link , & unbind );
209+ }
210+ spin_unlock (& obj -> vma .lock );
211+
212+ list_for_each_entry_safe (vma , vn , & unbind , vm_link ) {
203213 ret = __i915_vma_unbind (vma );
204- if (ret )
214+ if (ret ) {
215+ /* Restore the remaining vma on an error */
216+ list_splice (& unbind , & ggtt -> vm .bound_list );
205217 break ;
218+ }
206219 }
220+
207221 mutex_unlock (& ggtt -> vm .mutex );
208222
209223 return ret ;
@@ -275,6 +289,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
275289 }
276290 mutex_unlock (& obj -> mm .lock );
277291
292+ spin_lock (& obj -> vma .lock );
278293 for_each_ggtt_vma (vma , obj ) {
279294 vma -> fence_size =
280295 i915_gem_fence_size (i915 , vma -> size , tiling , stride );
@@ -285,6 +300,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
285300 if (vma -> fence )
286301 vma -> fence -> dirty = true;
287302 }
303+ spin_unlock (& obj -> vma .lock );
288304
289305 obj -> tiling_and_stride = tiling | stride ;
290306 i915_gem_object_unlock (obj );
0 commit comments