mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-25 00:52:45 -04:00
drm/ttm: switch to per device LRU lock
Instead of having a global lock for potentially less contention. Signed-off-by: Christian König <christian.koenig@amd.com> Tested-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/424010/
This commit is contained in:
@@ -91,7 +91,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
|
||||
.no_wait_gpu = false,
|
||||
.force_alloc = true
|
||||
};
|
||||
struct ttm_global *glob = &ttm_glob;
|
||||
struct dma_fence *fence;
|
||||
int ret;
|
||||
unsigned i;
|
||||
@@ -100,18 +99,18 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
|
||||
* Can't use standard list traversal since we're unlocking.
|
||||
*/
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&bdev->lru_lock);
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
while (!list_empty(&man->lru[i])) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
spin_lock(&glob->lru_lock);
|
||||
spin_lock(&bdev->lru_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
|
||||
spin_lock(&man->move_lock);
|
||||
fence = dma_fence_get(man->move);
|
||||
|
||||
Reference in New Issue
Block a user