Merge tag 'amd-drm-next-5.14-2021-06-02' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.14-2021-06-02:

amdgpu:
- GC/MM register access macro clean up for SR-IOV
- Beige Goby updates
- W=1 Fixes
- Aldebaran fixes
- Misc display fixes
- ACPI ATCS/ATIF handling rework
- SR-IOV fixes
- RAS fixes
- 16bpc fixed point format support
- Initial smartshift support
- RV/PCO power tuning fixes for suspend/resume
- More buffer object subclassing work
- Add new INFO query for additional vbios information
- Add new placement for preemptable SG buffers

amdkfd:
- Misc fixes

radeon:
- W=1 Fixes
- Misc cleanups

UAPI:
- Add new INFO query for additional vbios information
  Useful for debugging vbios related issues.  Proposed umr patch:
  https://patchwork.freedesktop.org/patch/433297/
- 16bpc fixed point format support
  IGT test:
  https://lists.freedesktop.org/archives/igt-dev/2021-May/031507.html
  Proposed Vulkan patch:
  a25d480207
- Add a new GEM flag which is only used internally in the kernel driver.  Userspace
  is not allowed to set it.

drm:
- 16bpc fixed point format fourcc

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602214009.4553-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie
2021-06-04 06:13:56 +10:00
164 changed files with 2704 additions and 1497 deletions

View File

@@ -51,9 +51,10 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o

View File

@@ -227,7 +227,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
break;
default:
break;
};
}
}
/* Reinit NBIF block */

View File

@@ -130,6 +130,13 @@ struct amdgpu_mgpu_info
bool pending_reset;
};
enum amdgpu_ss {
AMDGPU_SS_DRV_LOAD,
AMDGPU_SS_DEV_D0,
AMDGPU_SS_DEV_D3,
AMDGPU_SS_DRV_UNLOAD
};
struct amdgpu_watchdog_timer
{
bool timeout_fatal_disable;
@@ -268,7 +275,6 @@ struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
struct amdgpu_atif;
struct kfd_vm_fault_info;
struct amdgpu_hive_info;
struct amdgpu_reset_context;
@@ -682,20 +688,6 @@ struct amdgpu_vram_scratch {
u64 gpu_addr;
};
/*
* ACPI
*/
struct amdgpu_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
bool pcie_dev_rdy;
bool pcie_bus_width;
};
struct amdgpu_atcs {
struct amdgpu_atcs_functions functions;
};
/*
* CGS
*/
@@ -825,8 +817,6 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
struct amdgpu_atif *atif;
struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1146,6 +1136,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
* Registers read & write functions.
*/
#define AMDGPU_REGS_NO_KIQ (1<<1)
#define AMDGPU_REGS_RLC (1<<2)
#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
@@ -1282,6 +1273,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
@@ -1356,21 +1348,38 @@ struct amdgpu_afmt_acr {
struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
/* amdgpu_acpi.c */
/* ATCS Device/Driver State */
#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0
#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1
#if defined(CONFIG_ACPI)
int amdgpu_acpi_init(struct amdgpu_device *adev);
void amdgpu_acpi_fini(struct amdgpu_device *adev);
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
bool amdgpu_acpi_is_power_shift_control_supported(void);
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
struct amdgpu_dm_backlight_caps *caps);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
enum amdgpu_ss ss_state) { return 0; }
#endif
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,

View File

@@ -71,12 +71,31 @@ struct amdgpu_atif {
struct amdgpu_dm_backlight_caps backlight_caps;
};
struct amdgpu_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
bool pcie_dev_rdy;
bool pcie_bus_width;
bool power_shift_control;
};
struct amdgpu_atcs {
acpi_handle handle;
struct amdgpu_atcs_functions functions;
};
static struct amdgpu_acpi_priv {
struct amdgpu_atif atif;
struct amdgpu_atcs atcs;
} amdgpu_acpi_priv;
/* Call the ATIF method
*/
/**
* amdgpu_atif_call - call an ATIF method
*
* @atif: acpi handle
* @atif: atif structure
* @function: the ATIF function to execute
* @params: ATIF function params
*
@@ -207,35 +226,6 @@ out:
return err;
}
static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
{
acpi_handle handle = NULL;
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
acpi_status status;
/* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
* systems, ATIF is in the dGPU's namespace.
*/
status = acpi_get_handle(dhandle, "ATIF", &handle);
if (ACPI_SUCCESS(status))
goto out;
if (amdgpu_has_atpx()) {
status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
&handle);
if (ACPI_SUCCESS(status))
goto out;
}
DRM_DEBUG_DRIVER("No ATIF handle found\n");
return NULL;
out:
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
return handle;
}
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
@@ -414,7 +404,7 @@ out:
static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = adev->atif;
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -424,8 +414,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
return NOTIFY_DONE;
/* Is this actually our event? */
if (!atif ||
!atif->notification_cfg.enabled ||
if (!atif->notification_cfg.enabled ||
event->type != atif->notification_cfg.command_code) {
/* These events will generate keypresses otherwise */
if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
@@ -485,14 +474,15 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
/**
* amdgpu_atcs_call - call an ATCS method
*
* @handle: acpi handle
* @atcs: atcs structure
* @function: the ATCS function to execute
* @params: ATCS function params
*
* Executes the requested ATCS function (all asics).
* Returns a pointer to the acpi output buffer.
*/
static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs,
int function,
struct acpi_buffer *params)
{
acpi_status status;
@@ -516,7 +506,7 @@ static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
atcs_arg_elements[1].integer.value = 0;
}
status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -545,12 +535,12 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED;
}
/**
* amdgpu_atcs_verify_interface - verify ATCS
*
* @handle: acpi handle
* @atcs: amdgpu atcs struct
*
* Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
@@ -558,15 +548,14 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
static int amdgpu_atcs_verify_interface(acpi_handle handle,
struct amdgpu_atcs *atcs)
static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs)
{
union acpi_object *info;
struct atcs_verify_interface output;
size_t size;
int err = 0;
info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -603,7 +592,7 @@ out:
*/
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
{
struct amdgpu_atcs *atcs = &adev->atcs;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
return true;
@@ -611,6 +600,18 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
return false;
}
/**
* amdgpu_acpi_is_power_shift_control_supported
*
* Check if the ATCS power shift control method
* is supported.
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_power_shift_control_supported(void)
{
return amdgpu_acpi_priv.atcs.functions.power_shift_control;
}
/**
* amdgpu_acpi_pcie_notify_device_ready
*
@@ -622,19 +623,13 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
*/
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
{
acpi_handle handle;
union acpi_object *info;
struct amdgpu_atcs *atcs = &adev->atcs;
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
return -EINVAL;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (!atcs->functions.pcie_dev_rdy)
return -EINVAL;
info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
if (!info)
return -EIO;
@@ -657,9 +652,8 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise)
{
acpi_handle handle;
union acpi_object *info;
struct amdgpu_atcs *atcs = &adev->atcs;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct atcs_pref_req_input atcs_input;
struct atcs_pref_req_output atcs_output;
struct acpi_buffer params;
@@ -669,11 +663,6 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
if (amdgpu_acpi_pcie_notify_device_ready(adev))
return -EINVAL;
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
return -EINVAL;
if (!atcs->functions.pcie_perf_req)
return -EINVAL;
@@ -691,7 +680,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
params.pointer = &atcs_input;
while (retry--) {
info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
if (!info)
return -EIO;
@@ -724,6 +713,96 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
return 0;
}
/**
* amdgpu_acpi_power_shift_control
*
* @adev: amdgpu_device pointer
* @dev_state: device acpi state
* @drv_state: driver state
*
* Executes the POWER_SHIFT_CONTROL method to
* communicate current dGPU device state and
* driver state to APU/SBIOS.
* returns 0 on success, error on failure.
*/
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state)
{
union acpi_object *info;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct atcs_pwr_shift_input atcs_input;
struct acpi_buffer params;
if (!amdgpu_acpi_is_power_shift_control_supported())
return -EINVAL;
atcs_input.size = sizeof(struct atcs_pwr_shift_input);
/* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
atcs_input.dev_acpi_state = dev_state;
atcs_input.drv_state = drv_state;
params.length = sizeof(struct atcs_pwr_shift_input);
params.pointer = &atcs_input;
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, &params);
if (!info) {
DRM_ERROR("ATCS PSC update failed\n");
return -EIO;
}
return 0;
}
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
* @dev: drm_device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
if (!amdgpu_device_supports_smart_shift(dev))
return 0;
switch (ss_state) {
/* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational.
* SBIOS trigger stop at D3, Driver Not Operational.
* SBIOS trigger stop and disable at D0, Driver NOT operational.
*/
case AMDGPU_SS_DRV_LOAD:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D0,
AMDGPU_ATCS_PSC_DRV_STATE_OPR);
break;
case AMDGPU_SS_DEV_D0:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D0,
AMDGPU_ATCS_PSC_DRV_STATE_OPR);
break;
case AMDGPU_SS_DEV_D3:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT,
AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
break;
case AMDGPU_SS_DRV_UNLOAD:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D0,
AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
break;
default:
return -EINVAL;
}
return r;
}
/**
* amdgpu_acpi_event - handle notify events
*
@@ -767,50 +846,15 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
acpi_handle handle, atif_handle;
struct amdgpu_atif *atif;
struct amdgpu_atcs *atcs = &adev->atcs;
int ret;
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!adev->bios || !handle)
return 0;
/* Call the ATCS method */
ret = amdgpu_atcs_verify_interface(handle, atcs);
if (ret) {
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
}
/* Probe for ATIF, and initialize it if found */
atif_handle = amdgpu_atif_probe_handle(handle);
if (!atif_handle)
goto out;
atif = kzalloc(sizeof(*atif), GFP_KERNEL);
if (!atif) {
DRM_WARN("Not enough memory to initialize ATIF\n");
goto out;
}
atif->handle = atif_handle;
/* Call the ATIF method */
ret = amdgpu_atif_verify_interface(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
kfree(atif);
goto out;
}
adev->atif = atif;
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if (atif->notifications.brightness_change) {
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
atif->bd = dm->backlight_dev;
if (dm->backlight_dev)
atif->bd = dm->backlight_dev;
#endif
} else {
struct drm_encoder *tmp;
@@ -832,6 +876,129 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
#endif
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
return 0;
}
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
caps->caps_valid = atif->backlight_caps.caps_valid;
caps->min_input_signal = atif->backlight_caps.min_input_signal;
caps->max_input_signal = atif->backlight_caps.max_input_signal;
}
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
* @adev: amdgpu_device pointer
*
* Unregisters with the acpi notifier chain (all asics).
*/
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
}
/**
* amdgpu_atif_pci_probe_handle - look up the ATIF handle
*
* @pdev: pci device
*
* Look up the ATIF handles (all asics).
* Returns true if the handle is found, false if not.
*/
static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
acpi_handle dhandle, atif_handle;
acpi_status status;
int ret;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
if (ACPI_FAILURE(status)) {
return false;
}
amdgpu_acpi_priv.atif.handle = atif_handle;
acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif);
if (ret) {
amdgpu_acpi_priv.atif.handle = 0;
return false;
}
return true;
}
/**
* amdgpu_atcs_pci_probe_handle - look up the ATCS handle
*
* @pdev: pci device
*
* Look up the ATCS handles (all asics).
* Returns true if the handle is found, false if not.
*/
static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
acpi_handle dhandle, atcs_handle;
acpi_status status;
int ret;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
if (ACPI_FAILURE(status)) {
return false;
}
amdgpu_acpi_priv.atcs.handle = atcs_handle;
acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs);
if (ret) {
amdgpu_acpi_priv.atcs.handle = 0;
return false;
}
return true;
}
/*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
*
* Check if we have the ATIF/ATCS methods and populate
* the structures in the driver.
*/
void amdgpu_acpi_detect(void)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct pci_dev *pdev = NULL;
int ret;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
if (!atif->handle)
amdgpu_atif_pci_probe_handle(pdev);
if (!atcs->handle)
amdgpu_atcs_pci_probe_handle(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
if (!atif->handle)
amdgpu_atif_pci_probe_handle(pdev);
if (!atcs->handle)
amdgpu_atcs_pci_probe_handle(pdev);
}
if (atif->functions.sbios_requests && !atif->functions.system_params) {
/* XXX check this workraround, if sbios request function is
@@ -861,37 +1028,6 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
} else {
atif->backlight_caps.caps_valid = false;
}
out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
return ret;
}
void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
struct amdgpu_dm_backlight_caps *caps)
{
if (!adev->atif) {
caps->caps_valid = false;
return;
}
caps->caps_valid = adev->atif->backlight_caps.caps_valid;
caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
}
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
* @adev: amdgpu_device pointer
*
* Unregisters with the acpi notifier chain (all asics).
*/
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
kfree(adev->atif);
}
/**

View File

@@ -96,8 +96,8 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
lock_srbm(kgd, 0, 0, 0, vmid);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */
unlock_srbm(kgd);
@@ -161,7 +161,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
lock_srbm(kgd, mec, pipe, 0, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
@@ -239,13 +239,13 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
for (reg = hqd_base;
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
WREG32(reg, mqd_hqd[reg - hqd_base]);
WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (wptr) {
/* Don't read wptr with get_user because the user
@@ -274,27 +274,27 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
lower_32_bits(guessed_wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
upper_32_bits(guessed_wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
lower_32_bits((uint64_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
upper_32_bits((uint64_t)wptr));
pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
release_queue(kgd);
@@ -365,7 +365,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
break; \
(*dump)[i][0] = (addr) << 2; \
(*dump)[i++][1] = RREG32(addr); \
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
@@ -497,13 +497,13 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t low, high;
acquire_queue(kgd, pipe_id, queue_id);
act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
release_queue(kgd);
@@ -621,11 +621,11 @@ loop:
preempt_enable();
#endif
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
@@ -716,8 +716,8 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
mutex_lock(&adev->grbm_idx_mutex);
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
@@ -726,7 +726,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;

View File

@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 2:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 3:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
#define HQD_N_REGS (19+6+7+12)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)

View File

@@ -621,14 +621,13 @@ kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
ret = amdgpu_gem_object_create(adev, bo_size, 1,
AMDGPU_GEM_DOMAIN_CPU,
0, ttm_bo_type_sg,
mem->bo->tbo.base.resv,
AMDGPU_GEM_CREATE_PREEMPTIBLE,
ttm_bo_type_sg, mem->bo->tbo.base.resv,
&gobj);
amdgpu_bo_unreserve(mem->bo);
if (ret)
return ret;
amdgpu_bo_unreserve(mem->bo);
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->parent = amdgpu_bo_ref(mem->bo);
@@ -640,14 +639,16 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo **bo)
{
struct drm_gem_object *gobj;
int ret;
if (!mem->dmabuf) {
mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DRM_RDWR : 0);
if (IS_ERR(mem->dmabuf)) {
ret = PTR_ERR(mem->dmabuf);
mem->dmabuf = NULL;
return PTR_ERR(mem->dmabuf);
return ret;
}
}
@@ -662,6 +663,7 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
dma_buf_put(mem->dmabuf);
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
(*bo)->parent = amdgpu_bo_ref(mem->bo);
return 0;
@@ -1410,7 +1412,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
alloc_flags = 0;
alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);

View File

@@ -396,10 +396,10 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
spin_unlock(&adev->mm_stats.lock);
}
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_cs_parser *p = param;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
@@ -451,21 +451,6 @@ retry:
return r;
}
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
if (bo->shadow)
r = amdgpu_cs_bo_validate(p, bo->shadow);
return r;
}
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -493,7 +478,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages);
}
r = amdgpu_cs_validate(p, bo);
r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -593,7 +578,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bytes_moved_vis = 0;
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
amdgpu_cs_bo_validate, p);
if (r) {
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
goto error_validate;

View File

@@ -331,13 +331,15 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
return 0;
}
#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
static int amdgpu_ctx_query2(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
union drm_amdgpu_ctx_out *out)
struct amdgpu_fpriv *fpriv, uint32_t id,
union drm_amdgpu_ctx_out *out)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
unsigned long ras_counter;
if (!fpriv)
return -EINVAL;
@@ -362,19 +364,28 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
/*query ue count*/
ras_counter = amdgpu_ras_query_error_count(adev, false);
/*ras counter is monotonic increasing*/
if (ras_counter != ctx->ras_counter_ue) {
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
ctx->ras_counter_ue = ras_counter;
}
if (adev->ras_enabled && con) {
/* Return the cached values in O(1),
* and schedule delayed work to cache
* new vaues.
*/
int ce_count, ue_count;
/*query ce count*/
ras_counter = amdgpu_ras_query_error_count(adev, true);
if (ras_counter != ctx->ras_counter_ce) {
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
ctx->ras_counter_ce = ras_counter;
ce_count = atomic_read(&con->ras_ce_count);
ue_count = atomic_read(&con->ras_ue_count);
if (ce_count != ctx->ras_counter_ce) {
ctx->ras_counter_ce = ce_count;
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
}
if (ue_count != ctx->ras_counter_ue) {
ctx->ras_counter_ue = ue_count;
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
}
schedule_delayed_work(&con->ras_counte_delay_work,
msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
}
mutex_unlock(&mgr->lock);

View File

@@ -990,7 +990,7 @@ err:
}
/**
* amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
* amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
*
* @f: open file handle
* @buf: User buffer to write data from
@@ -1041,7 +1041,7 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
/**
* amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
* amdgpu_debugfs_gfxoff_read - read gfxoff status
*
* @f: open file handle
* @buf: User buffer to store read data in

View File

@@ -265,6 +265,21 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
return amdgpu_asic_supports_baco(adev);
}
/**
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
{
return (amdgpu_device_supports_boco(dev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
/*
* VRAM access helper functions
*/
@@ -501,7 +516,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -3151,7 +3166,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
if (amdgpu_sriov_vf(adev) ||
adev->enable_virtual_display ||
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
@@ -3809,6 +3826,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
return 0;
adev->in_suspend = true;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
drm_kms_helper_poll_disable(dev);
if (fbcon)
@@ -3918,6 +3939,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
#endif
adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
return 0;
}
@@ -4694,7 +4718,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
void amdgpu_device_recheck_guilty_jobs(
static void amdgpu_device_recheck_guilty_jobs(
struct amdgpu_device *adev, struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
@@ -4997,6 +5021,8 @@ skip_hw_reset:
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
}
}

View File

@@ -95,9 +95,10 @@
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
* - 3.41.0 - Add video codec query
* - 3.42.0 - Add 16bpc fixed point display support
*/
#define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 41
#define KMS_DRIVER_MINOR 42
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -1821,6 +1822,7 @@ static int __init amdgpu_init(void)
DRM_INFO("amdgpu kernel modesetting enabled.\n");
amdgpu_register_atpx_handler();
amdgpu_acpi_detect();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();

View File

@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
unsigned char buff[34];
int addrptr = 0, size = 0;
int addrptr, size;
int len;
if (!is_fru_eeprom_supported(adev))
return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
return 0;
return -ENODEV;
}
/* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
return size;
return -EINVAL;
}
/* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
return size;
return -EINVAL;
}
len = size;
/* Product name should only be 32 characters. Any more,
* and something could be wrong. Cap it at 32 to be safe
*/
if (size > 32) {
if (len >= sizeof(adev->product_name)) {
DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
size = 32;
len = sizeof(adev->product_name) - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
memcpy(adev->product_name, &buff[2], size);
adev->product_name[size] = '\0';
memcpy(adev->product_name, &buff[2], len);
adev->product_name[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
return size;
return -EINVAL;
}
len = size;
/* Product number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
if (size > 16) {
if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
size = 16;
len = sizeof(adev->product_number) - 1;
}
memcpy(adev->product_number, &buff[2], size);
adev->product_number[size] = '\0';
memcpy(adev->product_number, &buff[2], len);
adev->product_number[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size);
return size;
return -EINVAL;
}
addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
return size;
return -EINVAL;
}
len = size;
/* Serial number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
if (size > 16) {
if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
size = 16;
len = sizeof(adev->serial) - 1;
}
memcpy(adev->serial, &buff[2], size);
adev->serial[size] = '\0';
memcpy(adev->serial, &buff[2], len);
adev->serial[len] = '\0';
return 0;
}

View File

@@ -312,8 +312,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint64_t flags)
{
int r, i;
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
@@ -322,16 +320,26 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
if (!adev->gart.ptr)
return 0;
r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
adev->gart.ptr);
if (r)
return r;
return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
adev->gart.ptr);
}
/**
* amdgpu_gart_invalidate_tlb - invalidate gart TLB
*
* @adev: amdgpu device driver pointer
*
* Invalidate gart TLB which can be use as a way to flush gart changes
*
*/
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
{
int i;
mb();
amdgpu_asic_flush_hdp(adev, NULL);
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
return 0;
}
/**

View File

@@ -66,5 +66,5 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint64_t flags);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif

View File

@@ -537,7 +537,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
}
/**
* amdgpu_tmz_set -- check and set if a device supports TMZ
* amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
* @adev: amdgpu_device pointer
*
* Check and set if an the device @adev supports Trusted Memory
@@ -583,7 +583,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
}
/**
* amdgpu_noretry_set -- set per asic noretry defaults
* amdgpu_gmc_noretry_set -- set per asic noretry defaults
* @adev: amdgpu_device pointer
*
* Set a per asic default for the no-retry parameter.
@@ -638,13 +638,18 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
tmp = RREG32(reg);
tmp = (hub_type == AMDGPU_GFXHUB_0) ?
RREG32_SOC15_IP(GC, reg) :
RREG32_SOC15_IP(MMHUB, reg);
if (enable)
tmp |= hub->vm_cntx_cntl_vm_fault;
else
tmp &= ~hub->vm_cntx_cntl_vm_fault;
WREG32(reg, tmp);
(hub_type == AMDGPU_GFXHUB_0) ?
WREG32_SOC15_IP(GC, reg, tmp) :
WREG32_SOC15_IP(MMHUB, reg, tmp);
}
}

View File

@@ -216,10 +216,12 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_device *adev;
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, struct amdgpu_gtt_node, node);
@@ -229,6 +231,8 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
}
spin_unlock(&mgr->lock);
amdgpu_gart_invalidate_tlb(adev);
return r;
}

View File

@@ -130,7 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
bool skip_preamble, need_ctx_switch;
bool need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -227,7 +227,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
skip_preamble = ring->current_ctx == fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
status |= job->preamble_status;
status |= job->preemption_status;
@@ -245,14 +244,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
skip_preamble &&
!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
!amdgpu_mcbp &&
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
if (job && ring->funcs->emit_frame_cntl) {
if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
amdgpu_ring_emit_frame_cntl(ring, false, secure);

View File

@@ -183,7 +183,7 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
}
/**
* amdgpu_vm_grab_idle - grab idle VMID
* amdgpu_vmid_grab_idle - grab idle VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
@@ -256,7 +256,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
}
/**
* amdgpu_vm_grab_reserved - try to assign reserved VMID
* amdgpu_vmid_grab_reserved - try to assign reserved VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
@@ -325,7 +325,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
}
/**
* amdgpu_vm_grab_used - try to reuse a VMID
* amdgpu_vmid_grab_used - try to reuse a VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
@@ -397,7 +397,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
}
/**
* amdgpu_vm_grab_id - allocate the next free VMID
* amdgpu_vmid_grab - allocate the next free VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to

View File

@@ -92,6 +92,9 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
amdgpu_device_fini_hw(adev);
}
@@ -215,6 +218,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -862,6 +868,21 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
min((size_t)size, (size_t)(bios_size - bios_offset)))
? -EFAULT : 0;
}
case AMDGPU_INFO_VBIOS_INFO: {
struct drm_amdgpu_info_vbios vbios_info = {};
struct atom_context *atom_context;
atom_context = adev->mode_info.atom_context;
memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
vbios_info.version = atom_context->version;
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
sizeof(atom_context->vbios_ver_str));
memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
return copy_to_user(out, &vbios_info,
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
}
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->vbios_info.type);

View File

@@ -71,7 +71,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
}
amdgpu_bo_unref(&bo->parent);
if (bo->tbo.type == ttm_bo_type_device) {
if (bo->tbo.type != ttm_bo_type_kernel) {
ubo = to_amdgpu_bo_user(bo);
kfree(ubo->metadata);
}
@@ -133,7 +133,9 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_TT;
places[c].mem_type =
abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
c++;
}
@@ -612,35 +614,6 @@ fail_unreserve:
return r;
}
int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size,
struct amdgpu_bo *bo)
{
struct amdgpu_bo_param bp;
int r;
if (bo->shadow)
return 0;
memset(&bp, 0, sizeof(bp));
bp.size = size;
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create(adev, &bp, &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
return r;
}
/**
* amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
* @adev: amdgpu device object
@@ -668,6 +641,38 @@ int amdgpu_bo_create_user(struct amdgpu_device *adev,
*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
return r;
}
/**
* amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
* @adev: amdgpu device object
* @bp: parameters to be used for the buffer object
* @vmbo_ptr: pointer to the buffer object pointer
*
* Create a BO to be for GPUVM.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo_vm **vmbo_ptr)
{
struct amdgpu_bo *bo_ptr;
int r;
/* bo_ptr_size will be determined by the caller and it depends on
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
return r;
}
/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
@@ -702,6 +707,22 @@ retry:
return r;
}
/**
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
*
* @bo: BO that will be inserted into the shadow list
*
* Insert a BO to the shadow list.
*/
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&bo->shadow_list, &adev->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
/**
* amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
*
@@ -1191,6 +1212,9 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
ubo = to_amdgpu_bo_user(bo);
if (metadata_size)
*metadata_size = ubo->metadata_size;
if (buffer) {
if (buffer_size < ubo->metadata_size)
return -EINVAL;
@@ -1199,8 +1223,6 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
if (metadata_size)
*metadata_size = ubo->metadata_size;
if (flags)
*flags = ubo->metadata_flags;

View File

@@ -44,6 +44,7 @@
#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62)
#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
struct amdgpu_bo_param {
unsigned long size;
@@ -103,9 +104,6 @@ struct amdgpu_bo {
struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_interval_notifier notifier;
@@ -125,6 +123,12 @@ struct amdgpu_bo_user {
};
struct amdgpu_bo_vm {
struct amdgpu_bo bo;
struct amdgpu_bo *shadow;
struct amdgpu_vm_pt entries[];
};
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -252,6 +256,22 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
}
/**
* amdgpu_bo_shadowed - check if the BO is shadowed
*
* @bo: BO to be tested.
*
* Returns:
* NULL if not shadowed or else return a BO pointer.
*/
static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
{
if (bo->tbo.type == ttm_bo_type_kernel)
return to_amdgpu_bo_vm(bo)->shadow;
return NULL;
}
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
@@ -272,11 +292,11 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
int amdgpu_bo_create_user(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo_user **ubo_ptr);
int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size,
struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -312,6 +332,7 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,

View File

@@ -0,0 +1,190 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König, Felix Kuehling
*/
#include "amdgpu.h"
static inline struct amdgpu_preempt_mgr *
to_preempt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_preempt_mgr, manager);
}
/**
* DOC: mem_info_preempt_used
*
* The amdgpu driver provides a sysfs API for reporting current total amount of
* used preemptible memory.
* The file mem_info_preempt_used is used for this, and returns the current
* used size of the preemptible block, in bytes
*/
static ssize_t mem_info_preempt_used_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man;
man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT);
return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man));
}
static DEVICE_ATTR_RO(mem_info_preempt_used);
/**
* amdgpu_preempt_mgr_new - allocate a new node
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @mem: the resulting mem object
*
* Dummy, just count the space used without allocating resources or any limit.
*/
static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_resource *mem)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
atomic64_add(mem->num_pages, &mgr->used);
mem->mm_node = NULL;
mem->start = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
/**
* amdgpu_preempt_mgr_del - free ranges
*
* @man: TTM memory type manager
* @mem: TTM memory object
*
* Free the allocated GTT again.
*/
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *mem)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
atomic64_sub(mem->num_pages, &mgr->used);
}
/**
* amdgpu_preempt_mgr_usage - return usage of PREEMPT domain
*
* @man: TTM memory type manager
*
* Return how many bytes are used in the GTT domain
*/
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
s64 result = atomic64_read(&mgr->used);
return (result > 0 ? result : 0) * PAGE_SIZE;
}
/**
* amdgpu_preempt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
* @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n",
man->size, (u64)atomic64_read(&mgr->used));
}
static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
.alloc = amdgpu_preempt_mgr_new,
.free = amdgpu_preempt_mgr_del,
.debug = amdgpu_preempt_mgr_debug
};
/**
* amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM
*
* @adev: amdgpu_device pointer
*
* Allocate and initialize the GTT manager.
*/
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
{
struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
struct ttm_resource_manager *man = &mgr->manager;
int ret;
man->use_tt = true;
man->func = &amdgpu_preempt_mgr_func;
ttm_resource_manager_init(man, (1 << 30));
atomic64_set(&mgr->used, 0);
ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
return ret;
}
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT,
&mgr->manager);
ttm_resource_manager_set_used(man, true);
return 0;
}
/**
* amdgpu_preempt_mgr_fini - free and destroy GTT manager
*
* @adev: amdgpu_device pointer
*
* Destroy and free the GTT manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
{
struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
struct ttm_resource_manager *man = &mgr->manager;
int ret;
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;
device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
}

View File

@@ -76,6 +76,7 @@ struct psp_ring
uint64_t ring_mem_mc_addr;
void *ring_mem_handle;
uint32_t ring_size;
uint32_t ring_wptr;
};
/* More registers may will be supported */

View File

@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/reboot.h>
#include <linux/syscalls.h>
#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@@ -1043,29 +1044,36 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
}
/* get the total error counts on all IPs */
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce)
void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
unsigned long *ue_count)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
struct ras_err_data data = {0, 0};
unsigned long ce, ue;
if (!adev->ras_enabled || !con)
return 0;
return;
ce = 0;
ue = 0;
list_for_each_entry(obj, &con->head, node) {
struct ras_query_if info = {
.head = obj->head,
};
if (amdgpu_ras_query_error_status(adev, &info))
return 0;
return;
data.ce_count += info.ce_count;
data.ue_count += info.ue_count;
ce += info.ce_count;
ue += info.ue_count;
}
return is_ce ? data.ce_count : data.ue_count;
if (ce_count)
*ce_count = ce;
if (ue_count)
*ue_count = ue;
}
/* query/inject/cure end */
@@ -2109,6 +2117,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
adev->ras_hw_enabled & amdgpu_ras_mask;
}
static void amdgpu_ras_counte_dw(struct work_struct *work)
{
struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
ras_counte_delay_work.work);
struct amdgpu_device *adev = con->adev;
struct drm_device *dev = &adev->ddev;
unsigned long ce_count, ue_count;
int res;
res = pm_runtime_get_sync(dev->dev);
if (res < 0)
goto Out;
/* Cache new values.
*/
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
pm_runtime_mark_last_busy(dev->dev);
Out:
pm_runtime_put_autosuspend(dev->dev);
}
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -2123,6 +2155,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (!con)
return -ENOMEM;
con->adev = adev;
INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
atomic_set(&con->ras_ce_count, 0);
atomic_set(&con->ras_ue_count, 0);
con->objs = (struct ras_manager *)(con + 1);
amdgpu_ras_set_context(adev, con);
@@ -2226,6 +2263,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
struct ras_fs_if *fs_info,
struct ras_ih_if *ih_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
unsigned long ue_count, ce_count;
int r;
/* disable RAS feature per IP block if it is not supported */
@@ -2266,6 +2305,12 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
if (r)
goto sysfs;
/* Those are the cached values at init.
*/
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
return 0;
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
@@ -2384,6 +2429,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (con->features)
amdgpu_ras_disable_all_features(adev, 1);
cancel_delayed_work_sync(&con->ras_counte_delay_work);
amdgpu_ras_set_context(adev, NULL);
kfree(con);

View File

@@ -340,6 +340,11 @@ struct amdgpu_ras {
/* disable ras error count harvest in recovery */
bool disable_ras_err_cnt_harvest;
/* RAS count errors delayed work */
struct delayed_work ras_counte_delay_work;
atomic_t ras_ue_count;
atomic_t ras_ce_count;
};
struct ras_fs_data {
@@ -485,8 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
unsigned long *ue_count);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,

View File

@@ -127,8 +127,8 @@ struct amdgpu_rlc_funcs {
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};

View File

@@ -158,6 +158,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
break;
case TTM_PL_TT:
case AMDGPU_PL_PREEMPT:
default:
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
break;
@@ -198,6 +199,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
@@ -461,7 +463,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int r;
if (new_mem->mem_type == TTM_PL_TT) {
if (new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
if (r)
return r;
@@ -479,11 +482,13 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT) {
(new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
if (old_mem->mem_type == TTM_PL_TT &&
if ((old_mem->mem_type == TTM_PL_TT ||
old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = ttm_bo_wait_ctx(bo, ctx);
if (r)
@@ -568,6 +573,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
/* system memory */
return 0;
case TTM_PL_TT:
case AMDGPU_PL_PREEMPT:
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -987,6 +993,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return r;
}
amdgpu_gart_invalidate_tlb(adev);
ttm_resource_free(bo, &bo->mem);
bo->mem = tmp;
}
@@ -1273,7 +1280,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
if (mem && mem->mem_type == TTM_PL_TT) {
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
@@ -1347,6 +1355,15 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
switch (bo->mem.mem_type) {
case AMDGPU_PL_PREEMPT:
/* Preemptible BOs don't own system resources managed by the
* driver (pages, VRAM, GART space). They point to resources
* owned by someone else (e.g. pageable memory in user mode
* or a DMABuf). They are used in a preemptible context so we
* can guarantee no deadlocks and good QoS in case of MMU
* notifiers or DMABuf move notifiers from the resource owner.
*/
return false;
case TTM_PL_TT:
if (amdgpu_bo_is_amdgpu_bo(bo) &&
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
@@ -1727,6 +1744,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned)(gtt_size / (1024 * 1024)));
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
DRM_ERROR("Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
@@ -1767,6 +1791,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
@@ -1917,6 +1942,11 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
DRM_ERROR("Trying to clear preemptible memory.\n");
return -EINVAL;
}
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)

View File

@@ -31,6 +31,7 @@
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -54,6 +55,11 @@ struct amdgpu_gtt_mgr {
atomic64_t available;
};
struct amdgpu_preempt_mgr {
struct ttm_resource_manager manager;
atomic64_t used;
};
struct amdgpu_mman {
struct ttm_device bdev;
bool initialized;
@@ -70,6 +76,7 @@ struct amdgpu_mman {
struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr;
struct amdgpu_preempt_mgr preempt_mgr;
uint64_t stolen_vga_size;
struct amdgpu_bo *stolen_vga_memory;
@@ -97,6 +104,8 @@ struct amdgpu_copy_mem {
int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev);
int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
@@ -104,6 +113,8 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,

View File

@@ -840,9 +840,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL;
}
BUG();
return -EINVAL;
}

View File

@@ -88,7 +88,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
/**
* amdgpu_vce_init - allocate memory, load vce firmware
* amdgpu_vce_sw_init - allocate memory, load vce firmware
*
* @adev: amdgpu_device pointer
* @size: size for the new BO
@@ -205,7 +205,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
}
/**
* amdgpu_vce_fini - free memory
* amdgpu_vce_sw_fini - free memory
*
* @adev: amdgpu_device pointer
*
@@ -579,7 +579,7 @@ err:
}
/**
* amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
* amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
*
* @p: parser context
* @ib_idx: indirect buffer to use
@@ -720,7 +720,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
}
/**
* amdgpu_vce_cs_parse - parse and validate the command stream
* amdgpu_vce_ring_parse_cs - parse and validate the command stream
*
* @p: parser context
* @ib_idx: indirect buffer to use
@@ -956,7 +956,7 @@ out:
}
/**
* amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
* amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
*
* @p: parser context
* @ib_idx: indirect buffer to use

View File

@@ -653,15 +653,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
if (!bo->parent)
continue;
ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
&vm->lru_bulk_move);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&bo->shadow->tbo.mem,
if (shadow)
ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&adev->mman.bdev.lru_lock);
@@ -693,15 +693,21 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
return r;
if (shadow) {
r = validate(param, shadow);
if (r)
return r;
}
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
vm->update_funcs->map_table(bo);
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -733,7 +739,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
* @vmbo: BO to clear
* @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
@@ -743,13 +749,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
struct amdgpu_bo_vm *vmbo,
bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
struct amdgpu_vm_update_params params;
struct amdgpu_bo *ancestor = bo;
struct amdgpu_bo *ancestor = &vmbo->bo;
struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries;
uint64_t addr;
int r;
@@ -789,14 +796,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
return r;
if (bo->shadow) {
r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
&ctx);
if (vmbo->shadow) {
struct amdgpu_bo *shadow = vmbo->shadow;
r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
if (r)
return r;
}
r = vm->update_funcs->map_table(bo);
r = vm->update_funcs->map_table(vmbo);
if (r)
return r;
@@ -820,7 +828,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
}
r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags);
if (r)
return r;
@@ -843,7 +851,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}
}
r = vm->update_funcs->update(&params, bo, addr, 0, entries,
r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags);
if (r)
return r;
@@ -859,14 +867,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @vm: requesting vm
* @level: the page table level
* @immediate: use a immediate update
* @bo: pointer to the buffer object pointer
* @vmbo: pointer to the buffer object pointer
*/
static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
int level, bool immediate,
struct amdgpu_bo **bo)
struct amdgpu_bo_vm **vmbo)
{
struct amdgpu_bo_param bp;
struct amdgpu_bo *bo;
struct dma_resv *resv;
unsigned int num_entries;
int r;
memset(&bp, 0, sizeof(bp));
@@ -877,7 +888,14 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (level < AMDGPU_VM_PTB)
num_entries = amdgpu_vm_num_entries(adev, level);
else
num_entries = 0;
bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
if (vm->use_cpu_for_update)
bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -886,26 +904,41 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
if (vm->root.base.bo)
bp.resv = vm->root.base.bo->tbo.base.resv;
r = amdgpu_bo_create(adev, &bp, bo);
r = amdgpu_bo_create_vm(adev, &bp, vmbo);
if (r)
return r;
if (vm->is_compute_context && (adev->flags & AMD_IS_APU))
bo = &(*vmbo)->bo;
if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) {
(*vmbo)->shadow = NULL;
return 0;
}
if (!bp.resv)
WARN_ON(dma_resv_lock((*bo)->tbo.base.resv,
WARN_ON(dma_resv_lock(bo->tbo.base.resv,
NULL));
r = amdgpu_bo_create_shadow(adev, bp.size, *bo);
resv = bp.resv;
memset(&bp, 0, sizeof(bp));
bp.size = amdgpu_vm_bo_size(adev, level);
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (!bp.resv)
dma_resv_unlock((*bo)->tbo.base.resv);
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
if (!resv)
dma_resv_unlock(bo->tbo.base.resv);
if (r) {
amdgpu_bo_unref(bo);
amdgpu_bo_unref(&bo);
return r;
}
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
return 0;
}
@@ -929,22 +962,18 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo *pt;
struct amdgpu_bo *pt_bo;
struct amdgpu_bo_vm *pt;
int r;
if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
unsigned num_entries;
num_entries = amdgpu_vm_num_entries(adev, cursor->level);
entry->entries = kvmalloc_array(num_entries,
sizeof(*entry->entries),
GFP_KERNEL | __GFP_ZERO);
if (!entry->entries)
return -ENOMEM;
}
if (entry->base.bo)
if (entry->base.bo) {
if (cursor->level < AMDGPU_VM_PTB)
entry->entries =
to_amdgpu_bo_vm(entry->base.bo)->entries;
else
entry->entries = NULL;
return 0;
}
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
if (r)
@@ -953,8 +982,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
pt_bo = &pt->bo;
pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
if (cursor->level < AMDGPU_VM_PTB)
entry->entries = pt->entries;
else
entry->entries = NULL;
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
@@ -964,7 +998,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
error_free_pt:
amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
amdgpu_bo_unref(&pt_bo);
return r;
}
@@ -975,13 +1009,15 @@ error_free_pt:
*/
static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
{
struct amdgpu_bo *shadow;
if (entry->base.bo) {
shadow = amdgpu_bo_shadowed(entry->base.bo);
entry->base.bo->vm_bo = NULL;
list_del(&entry->base.vm_status);
amdgpu_bo_unref(&entry->base.bo->shadow);
amdgpu_bo_unref(&shadow);
amdgpu_bo_unref(&entry->base.bo);
}
kvfree(entry->entries);
entry->entries = NULL;
}
@@ -1280,7 +1316,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
1, 0, flags);
}
/**
@@ -1360,9 +1397,9 @@ error:
* Make sure to set the right flags for the PTEs at the desired level.
*/
static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
struct amdgpu_bo *bo, unsigned level,
struct amdgpu_bo_vm *pt, unsigned int level,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
unsigned int count, uint32_t incr,
uint64_t flags)
{
@@ -1378,7 +1415,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
flags);
}
@@ -1558,9 +1595,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
nptes, dst, incr, upd_flags,
vm->task_info.pid,
vm->immediate.fence_context);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
upd_flags);
amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
nptes, incr, upd_flags);
pe_start += nptes * 8;
dst += nptes * incr;
@@ -1583,9 +1620,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* completely covered by the range and so potentially still in use.
*/
while (cursor.pfn < frag_start) {
amdgpu_vm_free_pts(adev, params->vm, &cursor);
/* Make sure previous mapping is freed */
if (cursor.entry->base.bo) {
params->table_freed = true;
amdgpu_vm_free_pts(adev, params->vm, &cursor);
}
amdgpu_vm_pt_next(adev, &cursor);
params->table_freed = true;
}
} else if (frag >= shift) {
@@ -1822,7 +1862,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bo = gem_to_amdgpu_bo(gobj);
}
mem = &bo->tbo.mem;
if (mem->mem_type == TTM_PL_TT)
if (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT)
pages_addr = bo->tbo.ttm->dma_address;
}
@@ -2673,7 +2714,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
/* shadow bo doesn't have bo base, its validation needs its parent */
if (bo->parent && bo->parent->shadow == bo)
if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
bo = bo->parent;
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
@@ -2842,7 +2883,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
{
struct amdgpu_bo *root;
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
int r, i;
vm->va = RB_ROOT_CACHED;
@@ -2896,16 +2938,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
false, &root);
if (r)
goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
root_bo = &root->bo;
r = amdgpu_bo_reserve(root_bo, true);
if (r)
goto error_free_root;
r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
if (r)
goto error_unreserve;
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
@@ -2934,8 +2976,8 @@ error_unreserve:
amdgpu_bo_unreserve(vm->root.base.bo);
error_free_root:
amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&vm->root.base.bo);
amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&root_bo);
vm->root.base.bo = NULL;
error_free_delayed:
@@ -3033,7 +3075,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
r = amdgpu_vm_clear_bo(adev, vm,
to_amdgpu_bo_vm(vm->root.base.bo),
false);
if (r)
goto free_idr;
}
@@ -3077,7 +3121,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/* Free the shadow bo for compute VM */
amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
if (pasid)
vm->pasid = pasid;

View File

@@ -39,6 +39,7 @@
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
/*
* GPUVM handling
@@ -239,11 +240,11 @@ struct amdgpu_vm_update_params {
};
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
int (*commit)(struct amdgpu_vm_update_params *p,
struct dma_fence **fence);

View File

@@ -29,9 +29,9 @@
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{
return amdgpu_bo_kmap(table, NULL);
return amdgpu_bo_kmap(&table->bo, NULL);
}
/**
@@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* amdgpu_vm_cpu_update - helper to update page tables via CPU
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
* @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* Write count number of PT/PD entries directly.
*/
static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe,
struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
@@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
uint64_t value;
int r;
if (bo->tbo.moving) {
r = dma_fence_wait(bo->tbo.moving, true);
if (vmbo->bo.tbo.moving) {
r = dma_fence_wait(vmbo->bo.tbo.moving, true);
if (r)
return r;
}
pe += (unsigned long)amdgpu_bo_kptr(bo);
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);

View File

@@ -33,11 +33,11 @@
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{
int r;
r = amdgpu_ttm_alloc_gart(&table->tbo);
r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
if (r)
return r;
@@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_update - execute VM update
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
* @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* the IB.
*/
static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe,
struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
@@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (!p->pages_addr) {
/* set page commands needed */
if (bo->shadow)
amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
if (vmbo->shadow)
amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
@@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
/* copy commands needed */
ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
(bo->shadow ? 2 : 1);
(vmbo->shadow ? 2 : 1);
/* for padding */
ndw -= 7;
@@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
pte[i] |= flags;
}
if (bo->shadow)
amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
if (vmbo->shadow)
amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8;

View File

@@ -31,6 +31,7 @@
#define ATOM_DEBUG
#include "atomfirmware.h"
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
@@ -1299,12 +1300,168 @@ static void atom_index_iio(struct atom_context *ctx, int base)
}
}
static void atom_get_vbios_name(struct atom_context *ctx)
{
unsigned char *p_rom;
unsigned char str_num;
unsigned short off_to_vbios_str;
unsigned char *c_ptr;
int name_size;
int i;
const char *na = "--N/A--";
char *back;
p_rom = ctx->bios;
str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
if (str_num != 0) {
off_to_vbios_str =
*(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
} else {
/* do not know where to find name */
memcpy(ctx->name, na, 7);
ctx->name[7] = 0;
return;
}
/*
* skip the atombios strings, usually 4
* 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
*/
for (i = 0; i < str_num; i++) {
while (*c_ptr != 0)
c_ptr++;
c_ptr++;
}
/* skip the following 2 chars: 0x0D 0x0A */
c_ptr += 2;
name_size = strnlen(c_ptr, STRLEN_LONG - 1);
memcpy(ctx->name, c_ptr, name_size);
back = ctx->name + name_size;
while ((*--back) == ' ')
;
*(back + 1) = '\0';
}
static void atom_get_vbios_date(struct atom_context *ctx)
{
unsigned char *p_rom;
unsigned char *date_in_rom;
p_rom = ctx->bios;
date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
ctx->date[0] = '2';
ctx->date[1] = '0';
ctx->date[2] = date_in_rom[6];
ctx->date[3] = date_in_rom[7];
ctx->date[4] = '/';
ctx->date[5] = date_in_rom[0];
ctx->date[6] = date_in_rom[1];
ctx->date[7] = '/';
ctx->date[8] = date_in_rom[3];
ctx->date[9] = date_in_rom[4];
ctx->date[10] = ' ';
ctx->date[11] = date_in_rom[9];
ctx->date[12] = date_in_rom[10];
ctx->date[13] = date_in_rom[11];
ctx->date[14] = date_in_rom[12];
ctx->date[15] = date_in_rom[13];
ctx->date[16] = '\0';
}
static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
int end, int maxlen)
{
unsigned long str_off;
unsigned char *p_rom;
unsigned short str_len;
str_off = 0;
str_len = strnlen(str, maxlen);
p_rom = ctx->bios;
for (; start <= end; ++start) {
for (str_off = 0; str_off < str_len; ++str_off) {
if (str[str_off] != *(p_rom + start + str_off))
break;
}
if (str_off == str_len || str[str_off] == 0)
return p_rom + start;
}
return NULL;
}
static void atom_get_vbios_pn(struct atom_context *ctx)
{
unsigned char *p_rom;
unsigned short off_to_vbios_str;
unsigned char *vbios_str;
int count;
off_to_vbios_str = 0;
p_rom = ctx->bios;
if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
off_to_vbios_str =
*(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
} else {
vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
}
if (*vbios_str == 0) {
vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
if (vbios_str == NULL)
vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
}
if (vbios_str != NULL && *vbios_str == 0)
vbios_str++;
if (vbios_str != NULL) {
count = 0;
while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
vbios_str[count] <= 'z') {
ctx->vbios_pn[count] = vbios_str[count];
count++;
}
ctx->vbios_pn[count] = 0;
}
}
static void atom_get_vbios_version(struct atom_context *ctx)
{
unsigned char *vbios_ver;
/* find anchor ATOMBIOSBK-AMD */
vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64);
if (vbios_ver != NULL) {
/* skip ATOMBIOSBK-AMD VER */
vbios_ver += 18;
memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
} else {
ctx->vbios_ver_str[0] = '\0';
}
}
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
char *str;
struct _ATOM_ROM_HEADER *atom_rom_header;
struct _ATOM_MASTER_DATA_TABLE *master_table;
struct _ATOM_FIRMWARE_INFO *atom_fw_info;
u16 idx;
if (!ctx)
@@ -1353,6 +1510,21 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
}
atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
if (atom_rom_header->usMasterDataTableOffset != 0) {
master_table = (struct _ATOM_MASTER_DATA_TABLE *)
CSTR(atom_rom_header->usMasterDataTableOffset);
if (master_table->ListOfDataTables.FirmwareInfo != 0) {
atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
CSTR(master_table->ListOfDataTables.FirmwareInfo);
ctx->version = atom_fw_info->ulFirmwareRevision;
}
}
atom_get_vbios_name(ctx);
atom_get_vbios_pn(ctx);
atom_get_vbios_date(ctx);
atom_get_vbios_version(ctx);
return ctx;
}

View File

@@ -112,6 +112,10 @@ struct drm_device;
#define ATOM_IO_SYSIO 2
#define ATOM_IO_IIO 0x80
#define STRLEN_NORMAL 32
#define STRLEN_LONG 64
#define STRLEN_VERYLONG 254
struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
@@ -140,6 +144,12 @@ struct atom_context {
uint32_t *scratch;
int scratch_size_bytes;
char vbios_version[20];
uint8_t name[STRLEN_LONG];
uint8_t vbios_pn[STRLEN_LONG];
uint32_t version;
uint8_t vbios_ver_str[STRLEN_NORMAL];
uint8_t date[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;

View File

@@ -720,7 +720,7 @@ err0:
}
/**
* cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
* cik_sdma_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -746,7 +746,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
}
/**
* cik_sdma_vm_write_pages - update PTEs by writing them manually
* cik_sdma_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -775,7 +775,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
}
/**
* cik_sdma_vm_set_pages - update the page tables using sDMA
* cik_sdma_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -804,7 +804,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
}
/**
* cik_sdma_vm_pad_ib - pad the IB to the required number of dw
* cik_sdma_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding

View File

@@ -456,7 +456,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
}
/**
* cik_get_number_of_dram_channels - get the number of dram channels
* si_get_number_of_dram_channels - get the number of dram channels
*
* @adev: amdgpu_device pointer
*

View File

@@ -47,7 +47,7 @@
#include "gfx_v10_0.h"
#include "nbio_v2_3.h"
/**
/*
* Navi10 has two graphic rings to share each graphic pipe.
* 1. Primary ring
* 2. Async ring
@@ -1432,38 +1432,36 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
int write, u32 *rlcg_flag)
{
/* always programed by rlcg, only for gc */
if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
if (!amdgpu_sriov_reg_indirect_gc(adev))
*flag = GFX_RLCG_GC_WRITE_OLD;
else
*flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
switch (hwip) {
case GC_HWIP:
if (amdgpu_sriov_reg_indirect_gc(adev)) {
*rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
return true;
return true;
/* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
} else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
*rlcg_flag = GFX_RLCG_GC_WRITE_OLD;
return true;
}
break;
case MMHUB_HWIP:
if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
(acc_flags & AMDGPU_REGS_RLC) && write) {
*rlcg_flag = GFX_RLCG_MMHUB_WRITE;
return true;
}
break;
default:
DRM_DEBUG("Not program register by RLCG\n");
}
/* currently support gc read/write, mmhub write */
if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
if (amdgpu_sriov_reg_indirect_gc(adev))
*flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
else
return false;
} else {
if (amdgpu_sriov_reg_indirect_mmhub(adev))
*flag = GFX_RLCG_MMHUB_WRITE;
else
return false;
}
return true;
return false;
}
static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
@@ -1523,36 +1521,34 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
return ret;
}
static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
{
uint32_t rlcg_flag;
u32 rlcg_flag;
if (amdgpu_sriov_fullaccess(adev) &&
gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
if (!amdgpu_sriov_runtime(adev) &&
gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
return;
}
if (flag & AMDGPU_REGS_NO_KIQ)
if (acc_flags & AMDGPU_REGS_NO_KIQ)
WREG32_NO_KIQ(offset, value);
else
WREG32(offset, value);
}
static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
{
uint32_t rlcg_flag;
u32 rlcg_flag;
if (amdgpu_sriov_fullaccess(adev) &&
gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
if (!amdgpu_sriov_runtime(adev) &&
gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
if (flag & AMDGPU_REGS_NO_KIQ)
if (acc_flags & AMDGPU_REGS_NO_KIQ)
return RREG32_NO_KIQ(offset);
else
return RREG32(offset);
return 0;
}
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -3935,7 +3931,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
char wks[10];
char *wks = "";
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -3948,7 +3944,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
memset(wks, 0, sizeof(wks));
switch (adev->asic_type) {
case CHIP_NAVI10:
chip_name = "navi10";
@@ -3957,7 +3952,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
chip_name = "navi14";
if (!(adev->pdev->device == 0x7340 &&
adev->pdev->revision != 0x00))
snprintf(wks, sizeof(wks), "_wks");
wks = "_wks";
break;
case CHIP_NAVI12:
chip_name = "navi12";
@@ -5233,10 +5228,10 @@ static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
uint32_t tmp;
/* enable Save Restore Machine */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
tmp = RREG32_SOC15(GC, 0, mmRLC_SRM_CNTL);
tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
WREG32_SOC15(GC, 0, mmRLC_SRM_CNTL, tmp);
}
static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
@@ -7941,12 +7936,12 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
/* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
data = RREG32(reg);
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
@@ -8688,16 +8683,16 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cp_int_cntl = RREG32(cp_int_cntl_reg);
cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 0);
WREG32(cp_int_cntl_reg, cp_int_cntl);
WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(cp_int_cntl_reg);
cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 1);
WREG32(cp_int_cntl_reg, cp_int_cntl);
WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
break;
default:
break;
@@ -8741,16 +8736,16 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
mec_int_cntl = RREG32(mec_int_cntl_reg);
mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 0);
WREG32(mec_int_cntl_reg, mec_int_cntl);
WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
mec_int_cntl = RREG32(mec_int_cntl_reg);
mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 1);
WREG32(mec_int_cntl_reg, mec_int_cntl);
WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
default:
break;
@@ -8946,20 +8941,20 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
GENERIC2_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
tmp = RREG32(target);
tmp = RREG32_SOC15_IP(GC, target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 0);
WREG32(target, tmp);
WREG32_SOC15_IP(GC, target, tmp);
} else {
tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
tmp = RREG32(target);
tmp = RREG32_SOC15_IP(GC, target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
WREG32(target, tmp);
WREG32_SOC15_IP(GC, target, tmp);
}
break;
default:

View File

@@ -2116,7 +2116,7 @@ error_free_scratch:
}
/**
* gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
* gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp
*
* @ring: amdgpu_ring structure holding ring information
*
@@ -2242,7 +2242,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* IB stuff
*/
/**
* gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
* gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring
*
* @ring: amdgpu_ring structure holding ring information
* @job: job to retrieve vmid from
@@ -3196,7 +3196,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
}
/**
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
* gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP
*
* @ring: the ring to emit the commands to
*

View File

@@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
@@ -787,15 +787,16 @@ static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32
}
static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
u32 v, u32 acc_flags, u32 hwip)
{
if (amdgpu_sriov_fullaccess(adev)) {
gfx_v9_0_rlcg_rw(adev, offset, v, flag);
gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
return;
}
if (flag & AMDGPU_REGS_NO_KIQ)
if (acc_flags & AMDGPU_REGS_NO_KIQ)
WREG32_NO_KIQ(offset, v);
else
WREG32(offset, v);

View File

@@ -1641,8 +1641,8 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
return 0;
}
int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
@@ -1676,13 +1676,14 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
uint32_t i, j;
uint32_t value;
value = REG_SET_FIELD(0, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_ea_err_status_regs));
value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value);
}
}
@@ -1690,7 +1691,7 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
@@ -1699,7 +1700,7 @@ void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
}
int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
{
struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
int ret;
@@ -1734,6 +1735,7 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_ea_err_status_regs));
if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
@@ -1741,7 +1743,9 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
j, reg_value);
}
/* clear after read */
WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
CLEAR_ERROR_STATUS, 0x1);
WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value);
}
}
@@ -1772,7 +1776,7 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
}
}
void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
@@ -1782,7 +1786,7 @@ void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
gfx_v9_4_2_query_sq_timeout_status(adev);
}
void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
@@ -1792,7 +1796,7 @@ void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
gfx_v9_4_2_reset_sq_timeout_status(adev);
}
void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
{
uint32_t i;
uint32_t data;

View File

@@ -229,6 +229,10 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
unsigned char hub_ip = 0;
hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
/*
@@ -242,8 +246,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
if (use_semaphore) {
for (i = 0; i < adev->usec_timeout; i++) {
/* a read return value of 1 means semaphore acuqire */
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
hub->eng_distance * eng);
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
hub->eng_distance * eng, hub_ip);
if (tmp & 0x1)
break;
udelay(1);
@@ -253,7 +258,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng,
inv_req, hub_ip);
/*
* Issue a dummy read to wait for the ACK register to be cleared
@@ -261,12 +268,14 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
*/
if ((vmhub == AMDGPU_GFXHUB_0) &&
(adev->asic_type < CHIP_SIENNA_CICHLID))
RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng, hub_ip);
/* Wait for ACK with a delay.*/
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
hub->eng_distance * eng);
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
hub->eng_distance * eng, hub_ip);
tmp &= 1 << vmid;
if (tmp)
break;
@@ -280,8 +289,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
hub->eng_distance * eng, 0);
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
hub->eng_distance * eng, 0, hub_ip);
spin_unlock(&adev->gmc.invalidate_lock);
@@ -947,7 +956,7 @@ static int gmc_v10_0_sw_init(void *handle)
}
/**
* gmc_v8_0_gart_fini - vm fini callback
* gmc_v10_0_gart_fini - vm fini callback
*
* @adev: amdgpu_device pointer
*

View File

@@ -516,7 +516,7 @@ static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
}
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
* gmc_v7_0_set_fault_enable_default - update VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page

View File

@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);

View File

@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
static int jpeg_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
ring = &adev->jpeg.inst[i].ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);

View File

@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
static int jpeg_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
ring = &adev->jpeg.inst->ring_dec;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);

View File

@@ -29,6 +29,7 @@
#include "mmhub/mmhub_2_0_0_default.h"
#include "navi10_enum.h"
#include "gc/gc_10_1_0_offset.h"
#include "soc15_common.h"
#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d
@@ -192,11 +193,11 @@ static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
lower_32_bits(page_table_base));
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid,
upper_32_bits(page_table_base));
}
@@ -207,14 +208,14 @@ static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
}
@@ -223,12 +224,12 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
/* Program the AGP BAR */
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev)) {
/* Program the AGP BAR */
WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
@@ -335,7 +336,7 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
@@ -397,16 +398,16 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
i * hub->ctx_addr_distance,
lower_32_bits(adev->vm_manager.max_pfn - 1));
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
@@ -418,9 +419,9 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
unsigned i;
for (i = 0; i < 18; ++i) {
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
i * hub->eng_addr_distance, 0xffffffff);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
i * hub->eng_addr_distance, 0x1f);
}
}
@@ -449,7 +450,7 @@ static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
/* Disable all tables */
for (i = 0; i < AMDGPU_NUM_VMID; i++)
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
/* Setup TLB control */

View File

@@ -436,7 +436,7 @@ static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
}
/**
* mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
* mmhub_v9_4_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page

View File

@@ -466,7 +466,7 @@ void nv_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -849,8 +849,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_NAVI12:
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
} else {
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))

View File

@@ -733,7 +733,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -747,6 +747,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}

View File

@@ -375,7 +375,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
return data;
@@ -390,6 +390,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD);
psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}

View File

@@ -271,7 +271,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
* sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
* sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*

View File

@@ -754,7 +754,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
* sdma_v4_0_page_ring_set_wptr - commit the write pointer
* sdma_v4_0_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
@@ -820,7 +820,7 @@ static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
* sdma_v4_0_ring_set_wptr - commit the write pointer
* sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*

View File

@@ -328,9 +328,9 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
wptr = wptr << 32;
wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
}
@@ -371,9 +371,9 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
lower_32_bits(ring->wptr << 2));
WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
upper_32_bits(ring->wptr << 2));
}
}
@@ -440,20 +440,19 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
{
uint32_t gcr_cntl =
SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
SDMA_GCR_GLI_INV(1);
uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
}
/**
@@ -549,12 +548,12 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
}
@@ -571,7 +570,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
}
/**
* sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
* sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
@@ -615,11 +614,11 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
if (enable && amdgpu_sdma_phase_quantum) {
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
phase_quantum);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
if (!amdgpu_sriov_vf(adev))
@@ -686,58 +685,63 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
/* setup the wptr shadow polling */
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
F32_POLL_ENABLE, 1);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
wptr_poll_cntl);
/* set the wb address whether it's enabled or not */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
ring->gpu_addr >> 8);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
lower_32_bits(ring->wptr) << 2);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
upper_32_bits(ring->wptr) << 2);
}
doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
@@ -746,8 +750,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
doorbell_offset);
adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
@@ -756,7 +761,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
if (!amdgpu_sriov_vf(adev)) {
/* set utc l1 enable flag always to 1 */
@@ -790,15 +795,15 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->sched.ready = true;

View File

@@ -147,9 +147,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
return 0;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -187,6 +184,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
(void *)&adev->sdma.instance[0],
sizeof(struct amdgpu_sdma_instance));
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
return 0;
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
@@ -517,7 +517,7 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
}
/**
* sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
* sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.

View File

@@ -305,7 +305,7 @@ err0:
}
/**
* cik_dma_vm_copy_pte - update PTEs by copying them from the GART
* si_dma_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -402,7 +402,7 @@ static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
* si_dma_pad_ib - pad the IB to the required number of dw
* si_dma_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring pointer
* @ib: indirect buffer to fill with padding
@@ -415,7 +415,7 @@ static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
}
/**
* cik_sdma_ring_emit_pipeline_sync - sync the pipeline
* si_dma_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*

View File

@@ -106,7 +106,7 @@ static u32 smuio_v13_0_get_socket_id(struct amdgpu_device *adev)
}
/**
* smuio_v13_0_supports_host_gpu_xgmi - detect xgmi interface between cpu and gpu/s.
* smuio_v13_0_is_host_gpu_xgmi_supported - detect xgmi interface between cpu and gpu/s.
*
* @adev: amdgpu device pointer
*

View File

@@ -633,7 +633,9 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
if (entry->and_mask == 0xffffffff) {
tmp = entry->or_mask;
} else {
tmp = RREG32(reg);
tmp = (entry->hwip == GC_HWIP) ?
RREG32_SOC15_IP(GC, reg) : RREG32(reg);
tmp &= ~(entry->and_mask);
tmp |= (entry->or_mask & entry->and_mask);
}
@@ -644,7 +646,8 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
WREG32_RLC(reg, tmp);
else
WREG32(reg, tmp);
(entry->hwip == GC_HWIP) ?
WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
}

View File

@@ -27,28 +27,51 @@
/* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_wreg) ? \
adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \
WREG32(reg, value))
#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_rreg) ? \
adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \
RREG32(reg))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
(RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
& ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
(__RREG32_SOC15_RLC__( \
adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
0, ip##_HWIP) & \
~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
0, ip##_HWIP)
#define RREG32_SOC15(ip, inst, reg) \
RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
0, ip##_HWIP)
#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
AMDGPU_REGS_NO_KIQ, ip##_HWIP)
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
__RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP)
#define WREG32_SOC15(ip, inst, reg, value) \
WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
value, 0, ip##_HWIP)
#define WREG32_SOC15_IP(ip, reg, value) \
__WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \
value, 0, ip##_HWIP)
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \
({ int ret = 0; \
@@ -77,12 +100,7 @@
})
#define WREG32_RLC(reg, value) \
do { \
if (adev->gfx.rlc.funcs->rlcg_wreg) \
adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
else \
WREG32(reg, value); \
} while (0)
__WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP)
#define WREG32_RLC_EX(prefix, reg, value) \
do { \
@@ -108,24 +126,19 @@
} \
} while (0)
/* shadow the registers in the callback function */
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP)
/* for GC only */
#define RREG32_RLC(reg) \
(adev->gfx.rlc.funcs->rlcg_rreg ? \
adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
__RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP)
#define WREG32_RLC_NO_KIQ(reg, value) \
do { \
if (adev->gfx.rlc.funcs->rlcg_wreg) \
adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
else \
WREG32_NO_KIQ(reg, value); \
} while (0)
#define WREG32_RLC_NO_KIQ(reg, value, hwip) \
__WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
#define RREG32_RLC_NO_KIQ(reg) \
(adev->gfx.rlc.funcs->rlcg_rreg ? \
adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
#define RREG32_RLC_NO_KIQ(reg, hwip) \
__RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
do { \
@@ -146,12 +159,12 @@
} while (0)
#define RREG32_SOC15_RLC(ip, inst, reg) \
RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP)
#define WREG32_SOC15_RLC(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
WREG32_RLC(target_reg, value); \
__WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \
} while (0)
#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
@@ -161,14 +174,16 @@
} while (0)
#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
(RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
& ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
(__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
AMDGPU_REGS_RLC, ip##_HWIP) & \
~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
AMDGPU_REGS_RLC, ip##_HWIP)
#define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP)
#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
__RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP)
#endif

View File

@@ -340,7 +340,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
/* disable interupt */
/* disable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
@@ -405,7 +405,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
return r;
}
/* enable interupt */
/* enable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
WREG32_P(mmUVD_STATUS, 0, ~(1<<2));

View File

@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;

View File

@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
return 0;
}
@@ -765,7 +769,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
}
/**
* vcn_v1_0_start - start VCN block
* vcn_v1_0_start_spg_mode - start VCN block
*
* @adev: amdgpu_device pointer
*
@@ -1101,7 +1105,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
}
/**
* vcn_v1_0_stop - stop VCN block
* vcn_v1_0_stop_spg_mode - stop VCN block
*
* @adev: amdgpu_device pointer
*

View File

@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))

View File

@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;

View File

@@ -386,15 +386,14 @@ done:
static int vcn_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
ring = &adev->vcn.inst[i].ring_dec;
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&