mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-23 05:56:14 -04:00
drm/ttm: switch to per device LRU lock
Instead of having a global lock for potentially less contention. Signed-off-by: Christian König <christian.koenig@amd.com> Tested-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/424010/
This commit is contained in:
@@ -51,14 +51,12 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
spin_lock(&ttm_glob.lru_lock);
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
|
||||
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&ttm_glob.lru_lock);
|
||||
|
||||
if (ticket)
|
||||
ww_acquire_fini(ticket);
|
||||
@@ -154,7 +152,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
spin_lock(&ttm_glob.lru_lock);
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
@@ -162,10 +159,9 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
dma_resv_add_shared_fence(bo->base.resv, fence);
|
||||
else
|
||||
dma_resv_add_excl_fence(bo->base.resv, fence);
|
||||
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
|
||||
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&ttm_glob.lru_lock);
|
||||
if (ticket)
|
||||
ww_acquire_fini(ticket);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user