amd-drm-next-6.18-2025-09-19:

amdgpu:
 - Fence drv clean up fix
 - DPC fixes
 - Misc display fixes
 - Support the MMIO remap page as a ttm pool
 - JPEG parser updates
 - UserQ updates
 - VCN ctx handling fixes
 - Documentation updates
 - Misc cleanups
 - SMU 13.0.x updates
 - SI DPM updates
 - GC 11.x cleaner shader updates
 - DMCUB updates
 - DML fixes
 - Improve fallback handling for pixel encoding
 - VCN reset improvements
 - DCE6 DC updates
 - DSC fixes
 - Use devm for i2c buses
 - GPUVM locking updates
 - GPUVM documentation improvements
 - Drop non-DC DCE11 code
 - S0ix fixes
 - Backlight fix
 - SR-IOV fixes
 
 amdkfd:
 - SVM updates
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaM2u5AAKCRC93/aFa7yZ
 2PcOAQC0US+uIJxkp1gmJxlpWsceD8GhonYp456gSx743XiMfAD/W8d4VKKicWMe
 DDzSumgBgjEG7YpuzZJoaExNeS/LWAg=
 =oKqL
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-next-6.18-2025-09-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.18-2025-09-19:

amdgpu:
- Fence drv clean up fix
- DPC fixes
- Misc display fixes
- Support the MMIO remap page as a ttm pool
- JPEG parser updates
- UserQ updates
- VCN ctx handling fixes
- Documentation updates
- Misc cleanups
- SMU 13.0.x updates
- SI DPM updates
- GC 11.x cleaner shader updates
- DMCUB updates
- DML fixes
- Improve fallback handling for pixel encoding
- VCN reset improvements
- DCE6 DC updates
- DSC fixes
- Use devm for i2c buses
- GPUVM locking updates
- GPUVM documentation improvements
- Drop non-DC DCE11 code
- S0ix fixes
- Backlight fix
- SR-IOV fixes

amdkfd:
- SVM updates

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://lore.kernel.org/r/20250919193354.2989255-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2025-09-22 08:44:52 +10:00
commit 342f141ba9
205 changed files with 2100 additions and 5201 deletions

View File

@ -13,5 +13,6 @@ Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8, 13.0.8
Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11
Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11
Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0, 14.0.0
Ryzen AI 330 series, Krackan Point, 3.6.0, 11.5.3, 4.0.5, 6.1.3, 14.0.5, 14.0.5
Ryzen AI 350 series, Krackan Point, 3.5.0, 11.5.2, 4.0.5, 6.1.2, 14.0.4, 14.0.4
Ryzen AI Max 300 series, Strix Halo, 3.5.1, 11.5.1, 4.0.6, 6.1.1, 14.0.1, 14.0.1

1 Product Name Code Reference DCN/DCE version GC version VCE/UVD/VCN version SDMA version MP0 version MP1 version
13 Ryzen 7x40 series Phoenix 3.1.4 11.0.1 / 11.0.4 4.0.2 6.0.1 13.0.4 / 13.0.11 13.0.4 / 13.0.11
14 Ryzen 8x40 series Hawk Point 3.1.4 11.0.1 / 11.0.4 4.0.2 6.0.1 13.0.4 / 13.0.11 13.0.4 / 13.0.11
15 Ryzen AI 300 series Strix Point 3.5.0 11.5.0 4.0.5 6.1.0 14.0.0 14.0.0
16 Ryzen AI 330 series Krackan Point 3.6.0 11.5.3 4.0.5 6.1.3 14.0.5 14.0.5
17 Ryzen AI 350 series Krackan Point 3.5.0 11.5.2 4.0.5 6.1.2 14.0.4 14.0.4
18 Ryzen AI Max 300 series Strix Halo 3.5.1 11.5.1 4.0.6 6.1.1 14.0.1 14.0.1

View File

@ -210,4 +210,4 @@ IP Blocks
:doc: IP Blocks
.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
:identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK
:identifiers: amd_ip_block_type amd_ip_funcs DC_FEATURE_MASK DC_DEBUG_MASK

View File

@ -138,7 +138,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
dce_v11_0.o \
amdgpu_vkms.o
# add GFX block

View File

@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@ -434,7 +435,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@ -545,7 +545,7 @@ struct amdgpu_wb {
* this value can be accessed directly by using the offset as an index.
* For the GPU address, it is necessary to use gpu_addr and the offset.
*/
volatile uint32_t *wb;
uint32_t *wb;
/**
* @gpu_addr:
@ -721,7 +721,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
volatile uint32_t *ptr;
uint32_t *ptr;
u64 gpu_addr;
};
@ -752,6 +752,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */

View File

@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
if (adev->kfd.dev)
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
if (adev->kfd.dev) {
if (adev->in_s0ix)
kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
else
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
}
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
if (adev->kfd.dev)
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
if (adev->kfd.dev) {
if (adev->in_s0ix)
r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
else
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
}
return r;
}

View File

@ -428,7 +428,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@ -518,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
{
return 0;
}
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
{
return 0;
}
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;

View File

@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)

View File

@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t bo_info_size = in->bo_info_size;
const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
int r;
info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
if (!info)
return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
r = -EFAULT;
if (likely(info_size == in->bo_info_size)) {
unsigned long bytes = in->bo_number *
in->bo_info_size;
if (copy_from_user(info, uptr, bytes))
goto error_free;
if (likely(info_size == bo_info_size)) {
info = vmemdup_array_user(uptr, bo_number, info_size);
if (IS_ERR(info))
return PTR_ERR(info);
} else {
unsigned long bytes = min(in->bo_info_size, info_size);
const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
memset(info, 0, in->bo_number * info_size);
for (i = 0; i < in->bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes))
goto error_free;
info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
if (!info)
return -ENOMEM;
uptr += in->bo_info_size;
memset(info, 0, bo_number * info_size);
for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
if (copy_from_user(&info[i], uptr, bytes)) {
kvfree(info);
return -EFAULT;
}
}
}
*info_param = info;
return 0;
error_free:
kvfree(info);
return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,

View File

@ -178,25 +178,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
goto free_chunk;
}
chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
cs->in.num_chunks,
sizeof(uint64_t));
if (IS_ERR(chunk_array))
return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@ -209,7 +201,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@ -222,20 +213,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
cdata = u64_to_user_ptr(user_chunk.chunk_data);
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
size,
sizeof(uint32_t));
if (IS_ERR(p->chunks[i].kdata)) {
ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
ret = -EFAULT;
goto free_partial_kdata;
}
/* Assume the worst on the following checks */
ret = -EINVAL;
@ -286,7 +273,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
if (!p->gang_size) {
if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
@ -1767,30 +1754,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
if (fences == NULL)
return -ENOMEM;
fences_user = u64_to_user_ptr(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
goto err_free_fences;
}
fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
wait->in.fence_count,
sizeof(struct drm_amdgpu_fence));
if (IS_ERR(fences))
return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
err_free_fences:
kfree(fences);
return r;

View File

@ -5072,6 +5072,10 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
/* No need to evict when going to S5 through S4 callbacks */
if (system_state == SYSTEM_POWER_OFF)
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
if (ret) {
dev_warn(adev->dev, "evicting device resources failed\n");
@ -5196,7 +5200,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
if (!adev->in_s0ix && !adev->in_runpm)
if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@ -5216,10 +5220,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix) {
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
amdgpu_userq_suspend(adev);
}
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@ -5314,15 +5316,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
if (r)
goto exit;
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
if (r)
goto exit;
r = amdgpu_userq_resume(adev);
if (r)
goto exit;
}
r = amdgpu_userq_resume(adev);
if (r)
goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@ -5335,7 +5335,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!adev->in_s0ix && !r && !adev->in_runpm)
if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}
@ -6937,7 +6937,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
@ -6976,10 +6977,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
hive, false);
if (hive) {
if (hive)
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
}
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@ -7161,7 +7160,7 @@ static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
struct pci_dev *parent = pci_upstream_bridge(adev->pdev);
int r;
if (parent->vendor != PCI_VENDOR_ID_ATI)
if (!parent || parent->vendor != PCI_VENDOR_ID_ATI)
return;
/* If already saved, return */

View File

@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_GWS] = "gws",
[AMDGPU_PL_OA] = "oa",
[AMDGPU_PL_DOORBELL] = "doorbell",
[AMDGPU_PL_MMIO_REMAP] = "mmioremap",
};
unsigned int hw_ip, i;

View File

@ -458,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
return -EINVAL;
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {

View File

@ -2280,7 +2280,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
* Return:
* return the latest index.
*/
u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
{
u32 count = 0;
@ -2304,7 +2304,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
* Return:
* return the latest index.
*/
u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
{
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
@ -2331,7 +2331,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer,
* @buffer: This is an output variable that gets the PACKET3 preamble end.
* @count: Index to start set the preemble end.
*/
void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
{
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);

View File

@ -642,9 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);

View File

@ -184,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@ -215,15 +215,6 @@ out_free:
}
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
{
if (!i2c)
return;
WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
if (!adev->is_atom_fw) {
@ -248,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
if (adev->i2c_bus[i]) {
amdgpu_i2c_destroy(adev->i2c_bus[i]);
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
}
}
}
/* looks up bus based on id */

View File

@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
volatile uint32_t *wptr_cpu;
uint32_t *wptr_cpu;
uint64_t rptr_addr;
volatile uint32_t *rptr_cpu;
uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;

View File

@ -540,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri
drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
}
}
static inline bool amdgpu_jpeg_reg_valid(u32 reg)
{
if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
(reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
return false;
else
return true;
}
/**
* amdgpu_jpeg_dec_parse_cs - command submission parser
*
* @parser: Command submission parser context
* @job: the job to parse
* @ib: the IB to parse
*
* Parse the command stream, return -EINVAL for invalid packet,
* 0 otherwise
*/
int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
u32 i, reg, res, cond, type;
struct amdgpu_device *adev = parser->adev;
for (i = 0; i < ib->length_dw ; i += 2) {
reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
res = CP_PACKETJ_GET_RES(ib->ptr[i]);
cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
if (res) /* only support 0 at the moment */
return -EINVAL;
switch (type) {
case PACKETJ_TYPE0:
if (cond != PACKETJ_CONDITION_CHECK0 ||
!amdgpu_jpeg_reg_valid(reg)) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE3:
if (cond != PACKETJ_CONDITION_CHECK3 ||
!amdgpu_jpeg_reg_valid(reg)) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE6:
if (ib->ptr[i] == CP_PACKETJ_NOP)
continue;
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
default:
dev_err(adev->dev, "Unknown packet type %d !\n", type);
return -EINVAL;
}
}
return 0;
}

View File

@ -25,11 +25,18 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
#define AMDGPU_MAX_JPEG_RINGS 10
#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
#define JPEG_REG_RANGE_START 0x4000
#define JPEG_REG_RANGE_END 0x41c2
#define JPEG_ATOMIC_RANGE_START 0x4120
#define JPEG_ATOMIC_RANGE_END 0x412A
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
const struct amdgpu_hwip_reg_entry *reg, u32 count);
void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/

View File

@ -939,6 +939,10 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
/* Gang submit is not supported under SRIOV currently */
if (!amdgpu_sriov_vf(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
if (amdgpu_passthrough(adev))
dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
AMDGPU_IDS_FLAGS_MODE_SHIFT) &

View File

@ -153,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@ -1546,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}
@ -1629,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
case AMDGPU_PL_MMIO_REMAP:
placement = "MMIO REMAP";
break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";

View File

@ -167,6 +167,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
case AMDGPU_PL_MMIO_REMAP:
return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}

View File

@ -506,7 +506,8 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
AMDGPU_GEM_DOMAIN_VRAM,
(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
&psp->fw_pri_buf);

View File

@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
if (!ta_bin)
return -ENOMEM;
if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
ret = -EFAULT;
goto err_free_bin;
}
ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
if (IS_ERR(ta_bin))
return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
if (!shared_buf)
return -ENOMEM;
if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
ret = -EFAULT;
goto err_free_shared_buf;
}
shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
if (IS_ERR(shared_buf))
return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);

View File

@ -219,10 +219,17 @@ static int amdgpu_check_address_validity(struct amdgpu_device *adev,
struct amdgpu_vram_block_info blk_info;
uint64_t page_pfns[32] = {0};
int i, ret, count;
bool hit = false;
if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
return 0;
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
return -EPERM;
return hit ? -EACCES : 0;
}
if ((address >= adev->gmc.mc_vram_size) ||
(address >= RAS_UMC_INJECT_ADDR_LIMIT))
return -EFAULT;
@ -2702,6 +2709,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@ -2730,6 +2738,13 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
/* wait 500ms to ensure pmfw polling mca bank info done */
msleep(500);
}
}
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {

View File

@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;

View File

@ -364,7 +364,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,

View File

@ -114,7 +114,7 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
@ -211,7 +211,18 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
unsigned extra_dw;
/**
* @extra_bytes:
*
* Optional extra space in bytes that is added to the ring size
* when allocating the BO that holds the contents of the ring.
* This space isn't used for command submission to the ring,
* but is just there to satisfy some hardware requirements or
* implement workarounds. It's up to the implementation of each
* specific ring to initialize this space.
*/
unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@ -298,7 +309,7 @@ struct amdgpu_ring {
unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
u32 *rptr_cpu_addr;
/**
* @wptr:
@ -378,19 +389,19 @@ struct amdgpu_ring {
* This is the CPU address pointer in the writeback slot. This is used
* to commit changes to the GPU.
*/
volatile u32 *wptr_cpu_addr;
u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
volatile u32 *fence_cpu_addr;
u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
volatile u32 *trail_fence_cpu_addr;
u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
u32 *set_q_mode_ptr;
u64 set_q_mode_token;
@ -470,10 +481,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
int i = 0;
while (i <= ring->buf_mask)
ring->ring[i++] = ring->funcs->nop;
memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)

View File

@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 *dst_ptr;
u32 i;
int r;
@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
volatile u32 *dst_ptr;
u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;

View File

@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs {
* and it also provides a pointer to it which is used by the firmware
* to load the clear state in some cases.
*/
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@ -275,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */

View File

@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@ -448,7 +449,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@ -539,10 +541,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
new_mem->mem_type == AMDGPU_PL_DOORBELL) {
new_mem->mem_type == AMDGPU_PL_DOORBELL ||
new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@ -630,6 +634,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
case AMDGPU_PL_MMIO_REMAP:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.offset += adev->rmmio_remap.bus_addr;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
default:
return -EINVAL;
}
@ -647,6 +657,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@ -1356,7 +1368,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
mem->mem_type == AMDGPU_PL_PREEMPT)) {
mem->mem_type == AMDGPU_PL_PREEMPT ||
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
@ -1843,6 +1856,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
/**
* amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
* @adev: amdgpu device
*
* Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
* hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
* PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
* GEM object (amdgpu_bo_create).
*
* Return:
* * 0 on success or intentional skip (feature not present/unsupported)
* * negative errno on allocation failure
*/
static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
{
struct amdgpu_bo_param bp;
int r;
/* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
return 0;
memset(&bp, 0, sizeof(bp));
/* Create exactly one GEM BO in the MMIO_REMAP domain. */
bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
bp.flags = 0;
bp.resv = NULL;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
if (r)
return r;
return 0;
}
/**
* amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
* @adev: amdgpu device
*
* Frees the kernel-owned MMIO_REMAP BO if it was allocated by
* amdgpu_ttm_mmio_remap_bo_init().
*/
static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
{
amdgpu_bo_unref(&adev->rmmio_remap.bo);
adev->rmmio_remap.bo = NULL;
}
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@ -1879,11 +1945,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
dev_err(adev->dev, "Failed initializing VRAM heap.\n");
return r;
if (!adev->gmc.is_app_apu) {
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
dev_err(adev->dev, "Failed initializing VRAM heap.\n");
return r;
}
}
/* Change the size here instead of the init above so only lpfn is affected */
@ -2010,6 +2078,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
/* Initialize MMIO-remap pool (single page 4K) */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
if (r) {
dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
return r;
}
/* Allocate the singleton MMIO_REMAP BO (4K) if supported */
r = amdgpu_ttm_mmio_remap_bo_init(adev);
if (r)
return r;
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
@ -2072,6 +2152,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@ -2084,7 +2166,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
amdgpu_vram_mgr_fini(adev);
if (!adev->gmc.is_app_apu)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
amdgpu_doorbell_fini(adev);
@ -2093,6 +2176,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
dev_info(adev->dev, "amdgpu: ttm finalized\n");

View File

@ -34,7 +34,8 @@
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2

View File

@ -44,6 +44,80 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
u64 expected_size)
{
struct amdgpu_bo_va_mapping *va_map;
u64 user_addr;
u64 size;
int r = 0;
user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
return r;
va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
if (!va_map) {
r = -EINVAL;
goto out_err;
}
/* Only validate the userq whether resident in the VM mapping range */
if (user_addr >= va_map->start &&
va_map->last - user_addr + 1 >= size) {
amdgpu_bo_unreserve(vm->root.bo);
return 0;
}
out_err:
amdgpu_bo_unreserve(vm->root.bo);
return r;
}
static int
amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *userq_funcs =
adev->userq_funcs[queue->queue_type];
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
r = userq_funcs->preempt(uq_mgr, queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
} else {
queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
}
}
return r;
}
static int
amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *userq_funcs =
adev->userq_funcs[queue->queue_type];
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
r = userq_funcs->restore(uq_mgr, queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
} else {
queue->state = AMDGPU_USERQ_STATE_MAPPED;
}
}
return r;
}
static int
amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
@ -53,7 +127,8 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
adev->userq_funcs[queue->queue_type];
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
r = userq_funcs->unmap(uq_mgr, queue);
if (r)
queue->state = AMDGPU_USERQ_STATE_HUNG;
@ -112,22 +187,6 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
kfree(queue);
}
int
amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
int queue_id;
int ret = 0;
mutex_lock(&uq_mgr->userq_mutex);
/* Resume all the queues for this process */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
mutex_unlock(&uq_mgr->userq_mutex);
return ret;
}
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
@ -323,6 +382,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
debugfs_remove_recursive(queue->debugfs_queue);
#endif
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
/*TODO: It requires a reset for userq hw unmap error*/
if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
queue->state = AMDGPU_USERQ_STATE_HUNG;
}
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
@ -404,27 +468,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
/* Usermode queues are only supported for GFX IP as of now */
if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
args->in.ip_type != AMDGPU_HW_IP_DMA &&
args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
args->in.ip_type);
return -EINVAL;
}
r = amdgpu_userq_priority_permit(filp, priority);
if (r)
return r;
if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
(args->in.ip_type != AMDGPU_HW_IP_GFX) &&
(args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
!amdgpu_is_tmz(adev)) {
drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
return -EINVAL;
}
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
@ -456,6 +503,14 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = -ENOMEM;
goto unlock;
}
/* Validate the userq virtual address.*/
if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
kfree(queue);
goto unlock;
}
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@ -543,22 +598,45 @@ unlock:
return r;
}
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
static int amdgpu_userq_input_args_validate(struct drm_device *dev,
union drm_amdgpu_userq *args,
struct drm_file *filp)
{
union drm_amdgpu_userq *args = data;
int r;
struct amdgpu_device *adev = drm_to_adev(dev);
switch (args->in.op) {
case AMDGPU_USERQ_OP_CREATE:
if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
return -EINVAL;
r = amdgpu_userq_create(filp, args);
if (r)
drm_file_err(filp, "Failed to create usermode queue\n");
break;
/* Usermode queues are only supported for GFX IP as of now */
if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
args->in.ip_type != AMDGPU_HW_IP_DMA &&
args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
args->in.ip_type);
return -EINVAL;
}
if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
(args->in.ip_type != AMDGPU_HW_IP_GFX) &&
(args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
!amdgpu_is_tmz(adev)) {
drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
return -EINVAL;
}
if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
args->in.queue_va == 0 ||
args->in.queue_size == 0) {
drm_file_err(filp, "invalidate userq queue va or size\n");
return -EINVAL;
}
if (!args->in.wptr_va || !args->in.rptr_va) {
drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
return -EINVAL;
}
break;
case AMDGPU_USERQ_OP_FREE:
if (args->in.ip_type ||
args->in.doorbell_handle ||
@ -571,6 +649,31 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
args->in.mqd ||
args->in.mqd_size)
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_userq *args = data;
int r;
if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
return -EINVAL;
switch (args->in.op) {
case AMDGPU_USERQ_OP_CREATE:
r = amdgpu_userq_create(filp, args);
if (r)
drm_file_err(filp, "Failed to create usermode queue\n");
break;
case AMDGPU_USERQ_OP_FREE:
r = amdgpu_userq_destroy(filp, args->in.queue_id);
if (r)
drm_file_err(filp, "Failed to destroy usermode queue\n");
@ -593,7 +696,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
/* Resume all the queues for this process */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
r = amdgpu_userq_map_helper(uq_mgr, queue);
r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
}
@ -603,108 +706,106 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
static int
amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
int ret;
amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
DRM_ERROR("Fail to validate\n");
return ret;
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
/* Handle all BOs on the invalidated list, validate them and update the PTs */
static int
amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
struct amdgpu_vm *vm)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
int ret;
spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va,
base.vm_status);
spin_unlock(&vm->invalidated_lock);
bo = bo_va->base.bo;
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
if (unlikely(ret))
return ret;
amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
/* This moves the bo_va to the done list */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret)
return ret;
spin_lock(&vm->invalidated_lock);
}
spin_unlock(&vm->invalidated_lock);
return 0;
}
/* Make sure the whole VM is ready to be used */
static int
amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_device *adev = uq_mgr->adev;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
struct ww_acquire_ctx *ticket;
struct drm_exec exec;
struct amdgpu_bo *bo;
struct dma_resv *resv;
bool clear, unlock;
int ret = 0;
int ret;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
ret = amdgpu_vm_lock_pd(vm, &exec, 2);
ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret)) {
drm_file_err(uq_mgr->file, "Failed to lock PD\n");
if (unlikely(ret))
goto unlock_all;
}
/* Lock the done list */
list_for_each_entry(bo_va, &vm->done, base.vm_status) {
bo = bo_va->base.bo;
if (!bo)
continue;
ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unlock_all;
ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unlock_all;
}
/* This validates PDs, PTs and per VM BOs */
ret = amdgpu_vm_validate(adev, vm, NULL,
amdgpu_userq_validate_vm,
NULL);
if (unlikely(ret))
goto unlock_all;
/* This locks and validates the remaining evicted BOs */
ret = amdgpu_userq_bo_validate(adev, &exec, vm);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unlock_all;
}
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
base.vm_status);
spin_unlock(&vm->status_lock);
ret = amdgpu_vm_handle_moved(adev, vm, NULL);
if (ret)
goto unlock_all;
/* Per VM BOs never need to bo cleared in the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret)
goto unlock_all;
spin_lock(&vm->status_lock);
}
ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
goto unlock_all;
ticket = &exec.ticket;
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
spin_unlock(&vm->status_lock);
bo = bo_va->base.bo;
ret = amdgpu_userq_validate_vm_bo(NULL, bo);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BO\n");
goto unlock_all;
}
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
clear = false;
unlock = true;
/* The caller is already holding the reservation lock */
} else if (dma_resv_locking_ctx(resv) == ticket) {
clear = false;
unlock = false;
/* Somebody else is using the BO right now */
} else {
clear = true;
unlock = false;
}
ret = amdgpu_vm_bo_update(adev, bo_va, clear);
if (unlock)
dma_resv_unlock(resv);
if (ret)
goto unlock_all;
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
/*
* We need to wait for all VM updates to finish before restarting the
* queues. Using the done list like that is now ok since everything is
* locked in place.
*/
list_for_each_entry(bo_va, &vm->done, base.vm_status)
dma_fence_wait(bo_va->last_pt_update, false);
dma_fence_wait(vm->last_update, false);
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
@ -725,7 +826,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
mutex_lock(&uq_mgr->userq_mutex);
ret = amdgpu_userq_validate_bos(uq_mgr);
ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
@ -750,7 +851,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
/* Try to unmap all the queues in this process ctx */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
}
@ -876,7 +977,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
r = amdgpu_userq_unmap_helper(uqm, queue);
if (adev->in_s0ix)
r = amdgpu_userq_preempt_helper(uqm, queue);
else
r = amdgpu_userq_unmap_helper(uqm, queue);
if (r)
ret = r;
}
@ -901,7 +1005,10 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
mutex_lock(&uqm->userq_mutex);
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
r = amdgpu_userq_map_helper(uqm, queue);
if (adev->in_s0ix)
r = amdgpu_userq_restore_helper(uqm, queue);
else
r = amdgpu_userq_map_helper(uqm, queue);
if (r)
ret = r;
}
@ -935,7 +1042,7 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
r = amdgpu_userq_unmap_helper(uqm, queue);
r = amdgpu_userq_preempt_helper(uqm, queue);
if (r)
ret = r;
}
@ -969,7 +1076,7 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
r = amdgpu_userq_map_helper(uqm, queue);
r = amdgpu_userq_restore_helper(uqm, queue);
if (r)
ret = r;
}

View File

@ -120,8 +120,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence);
int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);
@ -139,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
u64 expected_size);
#endif

View File

@ -0,0 +1,91 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef AMDGPU_UTILS_H_
#define AMDGPU_UTILS_H_
/* ---------- Generic 2bit capability attribute encoding ----------
* 00 INVALID, 01 RO, 10 WO, 11 RW
*/
enum amdgpu_cap_attr {
AMDGPU_CAP_ATTR_INVALID = 0,
AMDGPU_CAP_ATTR_RO = 1 << 0,
AMDGPU_CAP_ATTR_WO = 1 << 1,
AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
};
#define AMDGPU_CAP_ATTR_BITS 2
#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
/* Internal helper to build helpers for a given enum NAME */
#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
struct NAME##_caps { \
DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
}; \
static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
static inline void NAME##_attr_init(struct NAME##_caps *c) \
{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
static inline int NAME##_attr_set(struct NAME##_caps *c, \
enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
{ \
if (!c) \
return -EINVAL; \
if (cap >= NAME##_COUNT) \
return -EINVAL; \
if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
return -EINVAL; \
bitmap_write(c->bmap, (unsigned long)attr, \
NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
return 0; \
} \
static inline int NAME##_attr_get(const struct NAME##_caps *c, \
enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
{ \
unsigned long v; \
if (!c || !out) \
return -EINVAL; \
if (cap >= NAME##_COUNT) \
return -EINVAL; \
v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
*out = (enum amdgpu_cap_attr)v; \
return 0; \
} \
static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
/* Element expander for enum creation */
#define _CAP_ENUM_ELEM(x) x,
/* Public macro: declare enum + helpers from an Xmacro list */
#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
#endif /* AMDGPU_UTILS_H_ */

View File

@ -257,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
return 0;
}
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
int j;
if (adev->vcn.harvest_config & (1 << i))
return 0;
return;
amdgpu_bo_free_kernel(
&adev->vcn.inst[i].dpg_sram_bo,
@ -292,8 +292,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
return 0;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
@ -1159,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
volatile struct amdgpu_vcn_fwlog *plog;
struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@ -1172,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@ -1239,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;

View File

@ -516,7 +516,7 @@ enum vcn_ring_type {
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);

View File

@ -828,11 +828,14 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
{
ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
RATELIMIT_MSG_ON_RELEASE);
ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
RATELIMIT_MSG_ON_RELEASE);
ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
RATELIMIT_MSG_ON_RELEASE);
mutex_init(&adev->virt.ras.ras_telemetry_mutex);
@ -1501,3 +1504,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
if (virt->ops && virt->ops->req_bad_pages)
virt->ops->req_bad_pages(adev);
}
static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
struct amdsriov_ras_telemetry *host_telemetry,
bool *hit)
{
struct amd_sriov_ras_chk_criti *tmp = NULL;
uint32_t checksum, used_size;
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
goto out;
if (hit)
*hit = tmp->hit ? true : false;
out:
kfree(tmp);
return 0;
}
int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
{
struct amdgpu_virt *virt = &adev->virt;
int r = -EPERM;
if (!virt->ops || !virt->ops->req_ras_chk_criti)
return -EOPNOTSUPP;
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
*/
if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
mutex_lock(&virt->ras.ras_telemetry_mutex);
if (!virt->ops->req_ras_chk_criti(adev, addr))
r = amdgpu_virt_cache_chk_criti_hit(
adev, virt->fw_reserve.ras_telemetry, hit);
mutex_unlock(&virt->ras.ras_telemetry_mutex);
}
return r;
}

View File

@ -98,6 +98,7 @@ struct amdgpu_virt_ops {
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
int (*req_bad_pages)(struct amdgpu_device *adev);
int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@ -252,10 +253,15 @@ struct amdgpu_virt_ras_err_handler_data {
struct amdgpu_virt_ras {
struct ratelimit_state ras_error_cnt_rs;
struct ratelimit_state ras_cper_dump_rs;
struct ratelimit_state ras_chk_criti_rs;
struct mutex ras_telemetry_mutex;
uint64_t cper_rptr;
};
#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@ -274,6 +280,7 @@ struct amdgpu_virt {
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@ -448,4 +455,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif

View File

@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_VEGAM:
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:

View File

@ -127,6 +127,17 @@ struct amdgpu_vm_tlb_seq_struct {
struct dma_fence_cb cb;
};
/**
* amdgpu_vm_assert_locked - check if VM is correctly locked
* @vm: the VM which schould be tested
*
* Asserts that the VM root PD is locked.
*/
static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
{
dma_resv_assert_held(vm->root.bo->tbo.base.resv);
}
/**
* amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
*
@ -143,6 +154,8 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
{
int r;
amdgpu_vm_assert_locked(vm);
if (vm->pasid == pasid)
return 0;
@ -181,12 +194,11 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
spin_lock(&vm_bo->vm->status_lock);
amdgpu_vm_assert_locked(vm);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@ -198,9 +210,8 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
spin_lock(&vm_bo->vm->status_lock);
amdgpu_vm_assert_locked(vm_bo->vm);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
}
/**
@ -213,9 +224,8 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
spin_lock(&vm_bo->vm->status_lock);
amdgpu_vm_assert_locked(vm_bo->vm);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@ -229,9 +239,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
spin_lock(&vm_bo->vm->status_lock);
spin_lock(&vm_bo->vm->invalidated_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
spin_unlock(&vm_bo->vm->status_lock);
spin_unlock(&vm_bo->vm->invalidated_lock);
}
/**
@ -244,10 +254,9 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
vm_bo->moved = true;
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
spin_unlock(&vm_bo->vm->status_lock);
}
/**
@ -260,13 +269,11 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
if (vm_bo->bo->parent) {
spin_lock(&vm_bo->vm->status_lock);
amdgpu_vm_assert_locked(vm_bo->vm);
if (vm_bo->bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
spin_unlock(&vm_bo->vm->status_lock);
} else {
else
amdgpu_vm_bo_idle(vm_bo);
}
}
/**
@ -279,9 +286,8 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
spin_lock(&vm_bo->vm->status_lock);
amdgpu_vm_assert_locked(vm_bo->vm);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
spin_unlock(&vm_bo->vm->status_lock);
}
/**
@ -295,10 +301,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
spin_lock(&vm->status_lock);
spin_lock(&vm->invalidated_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
spin_unlock(&vm->invalidated_lock);
amdgpu_vm_assert_locked(vm_bo->vm);
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@ -308,14 +317,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
else if (bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
* Takes the vm status_lock and updates the shared memory stat. If the basic
* Takes the vm stats_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
@ -327,7 +335,8 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
spin_lock(&vm->status_lock);
dma_resv_assert_held(bo->tbo.base.resv);
spin_lock(&vm->stats_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
@ -339,7 +348,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
vm->stats[bo_memtype].drm.private += size;
}
}
spin_unlock(&vm->status_lock);
spin_unlock(&vm->stats_lock);
}
/**
@ -364,11 +373,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
* Caller need to have the vm status_lock held. Useful for when multiple update
* Caller need to have the vm stats_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
struct ttm_resource *res, int sign)
struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
@ -392,7 +401,8 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
if (!(bo->preferred_domains &
amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
@ -411,9 +421,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
{
struct amdgpu_vm *vm = base->vm;
spin_lock(&vm->status_lock);
spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
spin_unlock(&vm->status_lock);
spin_unlock(&vm->stats_lock);
}
/**
@ -439,10 +449,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
spin_lock(&vm->status_lock);
spin_lock(&vm->stats_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
spin_unlock(&vm->status_lock);
spin_unlock(&vm->stats_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@ -484,6 +494,42 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
2 + num_fences);
}
/**
* amdgpu_vm_lock_done_list - lock all BOs on the done list
* @vm: vm providing the BOs
* @exec: drm execution context
* @num_fences: number of extra fences to reserve
*
* Lock the BOs on the done list in the DRM execution context.
*/
int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences)
{
struct list_head *prev = &vm->done;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
int ret;
/* We can only trust prev->next while holding the lock */
spin_lock(&vm->invalidated_lock);
while (!list_is_head(prev->next, &vm->done)) {
bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
spin_unlock(&vm->invalidated_lock);
bo = bo_va->base.bo;
if (bo) {
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
if (unlikely(ret))
return ret;
}
spin_lock(&vm->invalidated_lock);
prev = prev->next;
}
spin_unlock(&vm->invalidated_lock);
return 0;
}
/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
@ -575,7 +621,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_vm_bo_base *bo_base, *tmp;
struct amdgpu_bo *bo;
int r;
@ -588,13 +634,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
spin_lock(&vm->status_lock);
while (!list_empty(&vm->evicted)) {
bo_base = list_first_entry(&vm->evicted,
struct amdgpu_vm_bo_base,
vm_status);
spin_unlock(&vm->status_lock);
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
bo = bo_base->bo;
r = validate(param, bo);
@ -607,37 +647,21 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
spin_lock(&vm->status_lock);
}
while (ticket && !list_empty(&vm->evicted_user)) {
bo_base = list_first_entry(&vm->evicted_user,
struct amdgpu_vm_bo_base,
vm_status);
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
if (ticket) {
list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
vm_status) {
bo = bo_base->bo;
dma_resv_assert_held(bo->tbo.base.resv);
if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
r = validate(param, bo);
if (r)
return r;
pr_warn_ratelimited("Evicted user BO is not reserved\n");
if (ti) {
pr_warn_ratelimited("pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
return -EINVAL;
amdgpu_vm_bo_invalidated(bo_base);
}
r = validate(param, bo);
if (r)
return r;
amdgpu_vm_bo_invalidated(bo_base);
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@ -660,13 +684,13 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
bool ret;
amdgpu_vm_assert_locked(vm);
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
@ -957,16 +981,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
struct amdgpu_vm_bo_base *entry;
struct amdgpu_vm_bo_base *entry, *tmp;
bool flush_tlb_needed = false;
LIST_HEAD(relocated);
int r, idx;
spin_lock(&vm->status_lock);
list_splice_init(&vm->relocated, &relocated);
spin_unlock(&vm->status_lock);
amdgpu_vm_assert_locked(vm);
if (list_empty(&relocated))
if (list_empty(&vm->relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@ -982,7 +1003,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
list_for_each_entry(entry, &relocated, vm_status) {
list_for_each_entry(entry, &vm->relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@ -998,9 +1019,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
while (!list_empty(&relocated)) {
entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
vm_status);
list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
amdgpu_vm_bo_idle(entry);
}
@ -1227,9 +1246,9 @@ error_free:
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
spin_lock(&vm->status_lock);
spin_lock(&vm->stats_lock);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
spin_unlock(&vm->status_lock);
spin_unlock(&vm->stats_lock);
}
/**
@ -1596,29 +1615,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo_va *bo_va, *tmp;
struct dma_resv *resv;
bool clear, unlock;
int r;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
base.vm_status);
spin_unlock(&vm->status_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
spin_lock(&vm->status_lock);
}
spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
spin_unlock(&vm->status_lock);
spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
@ -1650,9 +1664,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
spin_lock(&vm->status_lock);
spin_lock(&vm->invalidated_lock);
}
spin_unlock(&vm->status_lock);
spin_unlock(&vm->invalidated_lock);
return 0;
}
@ -2181,9 +2195,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
spin_lock(&vm->status_lock);
spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock);
spin_unlock(&vm->invalidated_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@ -2291,10 +2305,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
spin_lock(&vm->status_lock);
spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
spin_unlock(&vm->status_lock);
spin_unlock(&vm->stats_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
@ -2561,11 +2575,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->invalidated);
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
INIT_KFIFO(vm->faults);
spin_lock_init(&vm->stats_lock);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@ -3030,7 +3045,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
spin_lock(&vm->status_lock);
amdgpu_vm_assert_locked(vm);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@ -3068,11 +3084,13 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
spin_unlock(&vm->invalidated_lock);
total_invalidated_objs = id;
id = 0;
@ -3082,7 +3100,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,

View File

@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
/* protected by vm status_lock */
/* protected by vm reservation and invalidated_lock */
struct list_head vm_status;
/* if the bo is counted as shared in mem stats
* protected by vm status_lock */
* protected by vm BO being reserved */
bool shared;
/* protected by the BO being reserved */
@ -343,18 +343,22 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
/* Memory statistics for this vm, protected by status_lock */
/* Memory statistics for this vm, protected by stats_lock */
spinlock_t stats_lock;
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
/*
* The following lists contain amdgpu_vm_bo_base objects for either
* PDs, PTs or per VM BOs. The state transits are:
*
* evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
*
* Lists are protected by the root PD dma_resv lock.
*/
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
/* BOs for user mode queues that need a validation */
struct list_head evicted_user;
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@ -364,15 +368,32 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
/*
* The following lists contain amdgpu_vm_bo_base objects for BOs which
* have their own dma_resv object and not depend on the root PD. Their
* state transits are:
*
* evicted_user or invalidated -> done
*
* Lists are protected by the invalidated_lock.
*/
spinlock_t invalidated_lock;
/* BOs for user mode queues that need a validation */
struct list_head evicted_user;
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
/*
* This list contains amdgpu_bo_va_mapping objects which have been freed
* but not updated in the PTs
*/
struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@ -491,6 +512,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,

View File

@ -543,9 +543,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
@ -589,7 +587,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_pt_cursor seek;
struct amdgpu_vm_bo_base *entry;
spin_lock(&params->vm->status_lock);
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
if (entry && entry->bo)
list_move(&entry->vm_status, &params->tlb_flush_waitlist);
@ -597,7 +594,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
/* enter start node now */
list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
spin_unlock(&params->vm->status_lock);
}
/**

View File

@ -425,45 +425,6 @@ out:
return ret;
}
static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
}
static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
return false;
}
static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
return true;
}
static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
}
static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_resource **res)
{
DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
return -ENOSPC;
}
/**
* amdgpu_vram_mgr_new - allocate new ranges
*
@ -932,14 +893,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
}
static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
.alloc = amdgpu_dummy_vram_mgr_new,
.free = amdgpu_dummy_vram_mgr_del,
.intersects = amdgpu_dummy_vram_mgr_intersects,
.compatible = amdgpu_dummy_vram_mgr_compatible,
.debug = amdgpu_dummy_vram_mgr_debug
};
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@ -973,16 +926,10 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
if (!adev->gmc.is_app_apu) {
man->func = &amdgpu_vram_mgr_func;
err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
if (err)
return err;
} else {
man->func = &amdgpu_dummy_vram_mgr_func;
DRM_INFO("Setup dummy vram mgr\n");
}
man->func = &amdgpu_vram_mgr_func;
err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
if (err)
return err;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);

View File

@ -126,4 +126,8 @@ uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
uint16_t max_speed, uint8_t max_width);
/* Cleanup macro for use with __free(xgmi_put_hive) */
DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
#endif

View File

@ -405,12 +405,17 @@ struct amd_sriov_ras_cper_dump {
uint32_t buf[];
};
struct amd_sriov_ras_chk_criti {
uint32_t hit;
};
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
struct amd_sriov_ras_cper_dump cper_dump;
struct amd_sriov_ras_chk_criti chk_criti;
} body;
};

View File

@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
if (!ectx.ws) {
ret = -ENOMEM;
goto free;
}
ectx.ws_size = ws;
} else {
ectx.ws = NULL;

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __DCE_V11_0_H__
#define __DCE_V11_0_H__
extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
#endif

View File

@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
volatile uint32_t *cpu_ptr;
uint32_t *cpu_ptr;
long r;
memset(&ib, 0, sizeof(ib));
@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;

View File

@ -603,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
volatile uint32_t *cpu_ptr;
uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@ -850,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@ -1654,6 +1653,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.pfp_fw_version >= 102 &&
adev->gfx.mec_fw_version >= 66 &&
adev->mes.fw_version[0] >= 128) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
adev->gfx.enable_cleaner_shader = false;
dev_err(adev->dev, "Failed to initialize cleaner shader\n");
}
}
break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;

View File

@ -497,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
volatile uint32_t *cpu_ptr;
uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@ -685,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0, clustercount = 0, i;
const struct cs_section_def *sect = NULL;

View File

@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 *dst_ptr;
u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;

View File

@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = {
};
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;

View File

@ -1220,8 +1220,7 @@ out:
return err;
}
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;

View File

@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;

View File

@ -337,7 +337,7 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid, i;
if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
(adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x81) {
(adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) {
struct mes_inv_tlbs_pasid_input input = {0};
input.pasid = pasid;
input.flush_type = flush_type;
@ -521,6 +521,7 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_NOALLOC;
if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
*flags |= AMDGPU_PTE_IS_PTE;

View File

@ -1834,11 +1834,19 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
u32 vram_info;
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
adev->gmc.vram_vendor = vram_info & 0xF;
}
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)

View File

@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
.extra_dw = 64,
.extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,

View File

@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@ -806,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@ -854,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
/**
* jpeg_v2_dec_ring_parse_cs - command submission parser
*
* @parser: Command submission parser context
* @job: the job to parse
* @ib: the IB to parse
*
* Parse the command stream, return -EINVAL for invalid packet,
* 0 otherwise
*/
int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
u32 i, reg, res, cond, type;
struct amdgpu_device *adev = parser->adev;
for (i = 0; i < ib->length_dw ; i += 2) {
reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
res = CP_PACKETJ_GET_RES(ib->ptr[i]);
cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
if (res) /* only support 0 at the moment */
return -EINVAL;
switch (type) {
case PACKETJ_TYPE0:
if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
reg > JPEG_REG_RANGE_END) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE3:
if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
reg > JPEG_REG_RANGE_END) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE6:
if (ib->ptr[i] == CP_PACKETJ_NOP)
continue;
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
default:
dev_err(adev->dev, "Unknown packet type %d !\n", type);
return -EINVAL;
}
}
return 0;
}

View File

@ -45,9 +45,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
#define JPEG_REG_RANGE_START 0x4000
#define JPEG_REG_RANGE_END 0x41c2
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;

View File

@ -696,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@ -727,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@ -597,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@ -762,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@ -1177,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@ -807,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@ -683,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
.parse_cs = jpeg_v2_dec_ring_parse_cs,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@ -254,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
struct amdgpu_mqd_prop *userq_props;
struct amdgpu_gfx_shadow_info shadow_info;
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
@ -263,13 +264,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
return -ENOMEM;
}
if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
!mqd_user->queue_va || mqd_user->queue_size == 0) {
DRM_ERROR("Invalid MQD parameters for userqueue\n");
r = -EINVAL;
goto free_props;
}
r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
if (r) {
DRM_ERROR("Failed to create MQD object for userqueue\n");
@ -286,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
if (adev->gfx.funcs->get_gfx_shadow_info)
adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
@ -302,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
goto free_mqd;
userq_props->eop_gpu_addr = compute_mqd->eop_va;
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@ -329,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->csa_addr = mqd_gfx_v11->csa_va;
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
shadow_info.shadow_size))
goto free_mqd;
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@ -346,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
shadow_info.csa_size))
goto free_mqd;
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
}
@ -395,10 +404,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
}
static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
struct mes_suspend_gang_input queue_input;
struct amdgpu_userq_obj *ctx = &queue->fw_obj;
signed long timeout = 2100000; /* 2100 ms */
u64 fence_gpu_addr;
u32 fence_offset;
u64 *fence_ptr;
int i, r;
if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
return 0;
r = amdgpu_device_wb_get(adev, &fence_offset);
if (r)
return r;
fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
*fence_ptr = 0;
memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
queue_input.suspend_fence_addr = fence_gpu_addr;
queue_input.suspend_fence_value = 1;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r) {
DRM_ERROR("Failed to suspend gang: %d\n", r);
goto out;
}
for (i = 0; i < timeout; i++) {
if (*fence_ptr == 1)
goto out;
udelay(1);
}
r = -ETIMEDOUT;
out:
amdgpu_device_wb_free(adev, fence_offset);
return r;
}
static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
struct mes_resume_gang_input queue_input;
struct amdgpu_userq_obj *ctx = &queue->fw_obj;
int r;
if (queue->state == AMDGPU_USERQ_STATE_HUNG)
return -EINVAL;
if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
return 0;
memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
return r;
}
const struct amdgpu_userq_funcs userq_mes_funcs = {
.mqd_create = mes_userq_mqd_create,
.mqd_destroy = mes_userq_mqd_destroy,
.unmap = mes_userq_unmap,
.map = mes_userq_map,
.detect_and_reset = mes_userq_detect_and_reset,
.preempt = mes_userq_preempt,
.restore = mes_userq_restore,
};

View File

@ -202,6 +202,9 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
case IDH_REQ_RAS_CHK_CRITI:
event = IDH_REQ_RAS_CHK_CRITI_READY;
break;
default:
break;
}
@ -556,6 +559,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
}
static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
{
uint32_t addr_hi, addr_lo;
addr_hi = (uint32_t)(addr >> 32);
addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
return xgpu_nv_send_access_requests_with_param(
adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
}
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@ -569,4 +582,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
.req_bad_pages = xgpu_nv_req_ras_bad_pages,
.req_ras_chk_criti = xgpu_nv_check_vf_critical_region
};

View File

@ -43,6 +43,7 @@ enum idh_request {
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
IDH_REQ_RAS_BAD_PAGES = 205,
IDH_REQ_RAS_CHK_CRITI = 206
};
enum idh_event {
@ -62,6 +63,7 @@ enum idh_event {
IDH_RAS_BAD_PAGES_READY = 15,
IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_REQ_RAS_CHK_CRITI_READY = 18,
IDH_TEXT_MESSAGE = 255,
};

View File

@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
res = i2c_add_adapter(control);
res = devm_i2c_add_adapter(adev->dev, control);
if (res)
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
i2c_del_adapter(control);
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}

View File

@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
amdgpu_vcn_fwlog_init(adev->vcn.inst);
@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_sw_fini(ip_block);
r = amdgpu_vcn_sw_fini(adev, 0);
amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
return r;
return 0;
}
/**

View File

@ -137,7 +137,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r;
struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@ -252,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@ -267,9 +267,9 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev, 0);
amdgpu_vcn_sw_fini(adev, 0);
return r;
return 0;
}
/**
@ -853,7 +853,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
int ret;
@ -1001,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@ -1308,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);

View File

@ -277,7 +277,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << j))
continue;
@ -420,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@ -442,9 +442,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
r = amdgpu_vcn_suspend(adev, i);
if (r)
return r;
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
amdgpu_vcn_sw_fini(adev, i);
}
return 0;
@ -1000,7 +998,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int ret;
@ -1157,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared =
struct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@ -1669,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;

View File

@ -191,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -327,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -349,9 +349,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
amdgpu_vcn_sw_fini(adev, i);
}
return 0;
@ -1031,7 +1029,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int ret;
@ -1196,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int j, k, r;
@ -1717,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@ -1836,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_fw_shared *fw_shared;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */

View File

@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
@ -278,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -302,11 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@ -1000,7 +997,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
int ret;
@ -1140,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@ -1357,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@ -1609,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@ -1980,7 +1977,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
.extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
.extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,

View File

@ -212,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
/* There are no per-instance irq source IDs on 4.0.3, the IH
* packets use a separate field to differentiate instances.
*/
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
&adev->vcn.inst[i].sched_score);
if (r)
@ -259,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(&adev->ddev, &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@ -279,11 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@ -844,7 +845,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared =
struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
int vcn_inst, ret;
@ -1011,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_3_init_header header;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@ -1186,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
int j, k, r, vcn_inst;
uint32_t tmp;
@ -1396,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
int r = 0, vcn_inst;
uint32_t tmp;

View File

@ -149,7 +149,7 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -249,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -270,9 +270,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
amdgpu_vcn_sw_fini(adev, i);
}
return 0;
@ -912,7 +910,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
int ret;
@ -1049,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@ -1268,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;

View File

@ -129,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -211,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@ -232,11 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@ -695,7 +692,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
int ret;
@ -805,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@ -998,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;

View File

@ -226,7 +226,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@ -245,14 +245,11 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_fini(adev, i);
if (r)
return r;
}
amdgpu_vcn_sysfs_reset_mask_fini(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@ -643,7 +640,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared =
struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
@ -779,8 +776,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@ -954,7 +951,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r, vcn_inst;
@ -1146,7 +1143,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0, vcn_inst;

View File

@ -67,7 +67,6 @@
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
#include "dce_v10_0.h"
#include "dce_v11_0.h"
#include "iceland_ih.h"
#include "tonga_ih.h"
#include "cz_ih.h"
@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)

View File

@ -521,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
if (!minfo.cu_mask.ptr)
return -ENOMEM;
retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
if (retval) {
minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
if (IS_ERR(minfo.cu_mask.ptr)) {
pr_debug("Could not copy CU mask from userspace");
retval = -EFAULT;
goto out;
return PTR_ERR(minfo.cu_mask.ptr);
}
mutex_lock(&p->mutex);
@ -538,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
mutex_unlock(&p->mutex);
out:
kfree(minfo.cu_mask.ptr);
return retval;
}

View File

@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
{
struct kfd_node *node;
int i, r;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
r = node->dqm->ops.unhalt(node->dqm);
if (r) {
dev_err(kfd_device, "Error in starting scheduler\n");
return r;
}
}
return 0;
}
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
{
struct kfd_node *node;
int i, r;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
r = node->dqm->ops.halt(node->dqm);
if (r)
return r;
}
return 0;
}
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;

View File

@ -39,22 +39,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
static u64
svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
uint64_t src_addr, dst_addr;
uint64_t pte_flags;
u64 src_addr, dst_addr;
u64 pte_flags;
void *cpu_addr;
int r;
@ -123,15 +123,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
uint64_t *vram, uint64_t npages,
u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
uint64_t gart_s, gart_d;
u64 gart_s, gart_d;
struct dma_fence *next;
uint64_t size;
u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@ -261,30 +261,42 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
static long
static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
if (migrate->dst[i] & MIGRATE_PFN_VALID &&
migrate->src[i] & MIGRATE_PFN_MIGRATE)
mpages++;
}
return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
dma_addr_t *scratch, u64 ttm_res_offset)
{
uint64_t npages = migrate->npages;
u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
long mpages;
u64 mpages = 0;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
u64 *dst;
u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
mpages = 0;
for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
@ -345,14 +357,13 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
while (i-- && mpages) {
for (i = 0; i < npages && mpages; i++) {
if (!dst[i])
continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
mpages--;
}
mpages = r;
}
#ifdef DEBUG_FORCE_MIXED_DOMAINS
@ -370,22 +381,22 @@ out_free_vram_pages:
}
#endif
return mpages;
return r;
}
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
struct vm_area_struct *vma, u64 start,
u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
long mpages = 0;
unsigned long mpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@ -398,7 +409,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@ -431,17 +442,15 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
else
pr_debug("0x%lx pages collected\n", cpages);
mpages = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
if (mpages >= 0)
pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
else
r = mpages;
mpages = svm_migrate_successful_pages(&migrate);
pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@ -451,13 +460,14 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger, r);
out:
if (!r && mpages > 0) {
if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
}
return r ? r : mpages;
return mpages;
}
return r;
}
/**
@ -481,7 +491,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
uint64_t ttm_res_offset;
u64 ttm_res_offset;
struct kfd_node *node;
unsigned long mpages = 0;
long r = 0;
@ -568,18 +578,17 @@ static void svm_migrate_page_free(struct page *page)
}
}
static long
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t npages)
dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
uint64_t *src;
u64 *src;
dma_addr_t *dst;
struct page *dpage;
long mpages;
uint64_t i = 0, j;
uint64_t addr;
u64 i = 0, j;
u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
@ -587,10 +596,9 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
addr = migrate->start;
src = (uint64_t *)(scratch + npages);
src = (u64 *)(scratch + npages);
dst = scratch;
mpages = 0;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
@ -639,7 +647,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
mpages++;
j++;
}
@ -649,17 +656,13 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
out_oom:
if (r) {
pr_debug("failed %d copy to ram\n", r);
while (i-- && mpages) {
if (!migrate->dst[i])
continue;
while (i--) {
svm_migrate_put_sys_page(dst[i]);
migrate->dst[i] = 0;
mpages--;
}
mpages = r;
}
return mpages;
return r;
}
/**
@ -681,13 +684,13 @@ out_oom:
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
long mpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@ -707,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@ -741,15 +744,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
else
pr_debug("0x%lx pages collected\n", cpages);
mpages = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
migrate_vma_pages(&migrate);
if (mpages >= 0)
pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
mpages = svm_migrate_successful_pages(&migrate);
pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
else
r = mpages;
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@ -762,7 +763,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
if (!r && mpages > 0) {
if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@ -846,8 +847,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
if (r >= 0) {
WARN_ONCE(prange->vram_pages < mpages,
"Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
prange->vram_pages, mpages);
"Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system

View File

@ -2081,6 +2081,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
adev->dm.restore_backlight = true;
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
@ -2945,7 +2947,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
r = i2c_add_adapter(&oem_i2c->base);
r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
if (r) {
drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
@ -2957,17 +2959,6 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return 0;
}
static void dm_oem_i2c_hw_fini(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
if (dm->oem_i2c) {
i2c_del_adapter(&dm->oem_i2c->base);
kfree(dm->oem_i2c);
dm->oem_i2c = NULL;
}
}
/**
* dm_hw_init() - Initialize DC device
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
@ -3018,8 +3009,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
dm_oem_i2c_hw_fini(adev);
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
@ -3047,14 +3036,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
} else
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
if (enable) {
if (amdgpu_dm_crtc_vrr_active(
to_dm_crtc_state(acrtc->base.state)))
rc = amdgpu_dm_crtc_set_vupdate_irq(
&acrtc->base, true);
} else
rc = amdgpu_dm_crtc_set_vupdate_irq(
&acrtc->base, false);
if (rc)
drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
if (rc)
drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
enable ? "en" : "dis");
}
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
@ -3443,6 +3438,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
dc_resume(dm->dc);
adev->dm.restore_backlight = true;
amdgpu_dm_irq_resume_early(adev);
@ -6427,6 +6423,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
&& aconnector
&& aconnector->force_yuv422_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@ -7384,10 +7384,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (aconnector->i2c) {
i2c_del_adapter(&aconnector->i2c->base);
kfree(aconnector->i2c);
}
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@ -7687,6 +7683,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
bpc_limit = 8;
do {
drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
@ -7722,16 +7719,41 @@ create_validate_stream_for_sink(struct drm_connector *connector,
} while (stream == NULL && requested_bpc >= bpc_limit);
if ((dc_result == DC_FAIL_ENC_VALIDATE ||
dc_result == DC_EXCEED_DONGLE_CAP) &&
!aconnector->force_yuv420_output) {
DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
__func__, __LINE__);
aconnector->force_yuv420_output = true;
switch (dc_result) {
/*
* If we failed to validate DP bandwidth stream with the requested RGB color depth,
* we try to fallback and configure in order:
* YUV422 (8bpc, 6bpc)
* YUV420 (8bpc, 6bpc)
*/
case DC_FAIL_ENC_VALIDATE:
case DC_EXCEED_DONGLE_CAP:
case DC_NO_DP_LINK_BANDWIDTH:
/* recursively entered twice and already tried both YUV422 and YUV420 */
if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
break;
/* first failure; try YUV422 */
if (!aconnector->force_yuv422_output) {
drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
__func__, __LINE__, dc_result);
aconnector->force_yuv422_output = true;
/* recursively entered and YUV422 failed, try YUV420 */
} else if (!aconnector->force_yuv420_output) {
drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
__func__, __LINE__, dc_result);
aconnector->force_yuv420_output = true;
}
stream = create_validate_stream_for_sink(connector, drm_mode,
dm_state, old_stream);
dm_state, old_stream);
aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
break;
case DC_OK:
break;
default:
drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
__func__, __LINE__, dc_result);
break;
}
return stream;
@ -8719,7 +8741,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
}
aconnector->i2c = i2c;
res = i2c_add_adapter(&i2c->base);
res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
@ -8817,7 +8839,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
{
{ /*
* We cannot be sure that the frontend index maps to the same
* backend index - some even map to more than one.
* So we have to go through the CRTC to find the right IRQ.
*/
int irq_type = amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
struct drm_device *dev = adev_to_drm(adev);
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@ -8870,7 +8901,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 3):
case IP_VERSION(3, 2, 0):
if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
#endif
}
} else {
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 3):
case IP_VERSION(3, 2, 0):
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
#endif
if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
}
drm_crtc_vblank_off(&acrtc->base);
}
}
@ -9892,7 +9951,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
bool mode_set_reset_required = false;
u32 i;
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
bool set_backlight_level = false;
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@ -10012,7 +10070,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
set_backlight_level = true;
} else if (modereset_required(new_crtc_state)) {
drm_dbg_atomic(dev,
"Atomic commit: RESET. crtc id %d:[%p]\n",
@ -10069,13 +10126,16 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
* to fix a flicker issue.
* It will cause the dm->actual_brightness is not the current panel brightness
* level. (the dm->brightness is the correct panel level)
* So we set the backlight level with dm->brightness value after set mode
* So we set the backlight level with dm->brightness value after initial
* set mode. Use restore_backlight flag to avoid setting backlight level
* for every subsequent mode set.
*/
if (set_backlight_level) {
if (dm->restore_backlight) {
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i])
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
dm->restore_backlight = false;
}
}
@ -10788,6 +10848,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
} else {
config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@ -12689,7 +12751,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
if (!adev->dm.freesync_module)
if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()

View File

@ -630,6 +630,13 @@ struct amdgpu_display_manager {
*/
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
* @restore_backlight:
*
* Flag to indicate whether to restore backlight after modeset.
*/
bool restore_backlight;
/**
* @aux_hpd_discon_quirk:
*
@ -799,6 +806,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */

View File

@ -317,13 +317,17 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
sr_supported && vblank->config.disable_immediate)
drm_crtc_vblank_restore(crtc);
}
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
} else {
/* vblank irq off -> vupdate irq off */
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
} else {
/* vblank irq off -> vupdate irq off */
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
}
}
if (rc)

View File

@ -146,7 +146,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@ -732,7 +732,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
*mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);

View File

@ -98,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);

View File

@ -31,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
#include "dc/inc/link.h"
#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay

View File

@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dc_state_priv.h"
#include "link.h"
#include "link_service.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"

View File

@ -164,7 +164,7 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
cfg->sym_clock = stream->phy_pix_clk;
cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;

View File

@ -47,7 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
#include "link.h"
#include "link_service.h"
#include "logger_types.h"

View File

@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
#include "link.h"
#include "link_service.h"
#include "dcn314_smu.h"

View File

@ -46,7 +46,7 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
#include "link.h"
#include "link_service.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)

View File

@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "link.h"
#include "link_service.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7

View File

@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn32_smu13_driver_if.h"

View File

@ -44,7 +44,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
#include "link.h"
#include "link_service.h"
#include "logger_types.h"
#undef DC_LOGGER

View File

@ -13,7 +13,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"

View File

@ -60,7 +60,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link.h"
#include "link_service.h"
#include "dm_helpers.h"
#include "mem_input.h"
@ -5622,8 +5622,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
}
}
DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
if (!dc->caps.is_apu)
DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
__func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);

View File

@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
#include "link.h"
#include "link_service.h"
#define DC_LOGGER dc->ctx->logger

Some files were not shown because too many files have changed in this diff Show More