mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-18 06:33:43 -04:00
drm/amdgpu: Optimize VM invalidation engine allocation and synchronize GPU TLB flush
- Modify the VM invalidation engine allocation logic to handle SDMA page rings. SDMA page rings now share the VM invalidation engine with SDMA gfx rings instead of allocating a separate engine. This change ensures efficient resource management and avoids the issue of insufficient VM invalidation engines. - Add synchronization for GPU TLB flush operations in gmc_v9_0.c. Use spin_lock and spin_unlock to ensure thread safety and prevent race conditions during TLB flush operations. This improves the stability and reliability of the driver, especially in multi-threaded environments. v2: replace the sdma ring check with a function `amdgpu_sdma_is_page_queue` to check if a ring is an SDMA page queue.(Lijo) v3: Add GC version check, only enabled on GC9.4.3/9.4.4/9.5.0 v4: Fix code style and add more detailed description (Christian) v5: Remove dependency on vm_inv_eng loop order, explicitly lookup shared inv_eng(Christian/Lijo) v6: Added search shared ring function amdgpu_sdma_get_shared_ring (Lijo) Suggested-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Jesse Zhang <jesse.zhang@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
ea6dd40caf
commit
b09cdeb4d3
@@ -573,6 +573,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
|
||||
unsigned i;
|
||||
unsigned vmhub, inv_eng;
|
||||
struct amdgpu_ring *shared_ring;
|
||||
|
||||
/* init the vm inv eng for all vmhubs */
|
||||
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
|
||||
@@ -595,6 +596,10 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
ring == &adev->cper.ring_buf)
|
||||
continue;
|
||||
|
||||
/* Skip if the ring is a shared ring */
|
||||
if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
|
||||
continue;
|
||||
|
||||
inv_eng = ffs(vm_inv_engs[vmhub]);
|
||||
if (!inv_eng) {
|
||||
dev_err(adev->dev, "no VM inv eng for ring %s\n",
|
||||
@@ -607,6 +612,21 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
|
||||
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
|
||||
ring->name, ring->vm_inv_eng, ring->vm_hub);
|
||||
/* SDMA has a special packet which allows it to use the same
|
||||
* invalidation engine for all the rings in one instance.
|
||||
* Therefore, we do not allocate a separate VM invalidation engine
|
||||
* for SDMA page rings. Instead, they share the VM invalidation
|
||||
* engine with the SDMA gfx ring. This change ensures efficient
|
||||
* resource management and avoids the issue of insufficient VM
|
||||
* invalidation engines.
|
||||
*/
|
||||
shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
|
||||
if (shared_ring) {
|
||||
shared_ring->vm_inv_eng = ring->vm_inv_eng;
|
||||
dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
|
||||
ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user