mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 19:43:43 -04:00
While applying post-migration fixups to VF, GuC will not respond to any commands. This means submissions have no way of finishing. To avoid acquiring additional resources and then stalling on hardware access, pause the submission work. This will decrease the chance of depleting resources, and speed up the recovery. v2: Commented xe_irq_resume() call v3: Typo fix Signed-off-by: Tomasz Lis <tomasz.lis@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Michal Winiarski <michal.winiarski@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: https://lore.kernel.org/r/20250802031045.1127138-3-tomasz.lis@intel.com Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
105 lines
2.8 KiB
C
105 lines
2.8 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2023 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_GPU_SCHEDULER_H_
|
|
#define _XE_GPU_SCHEDULER_H_
|
|
|
|
#include "xe_gpu_scheduler_types.h"
|
|
#include "xe_sched_job_types.h"
|
|
|
|
int xe_sched_init(struct xe_gpu_scheduler *sched,
|
|
const struct drm_sched_backend_ops *ops,
|
|
const struct xe_sched_backend_ops *xe_ops,
|
|
struct workqueue_struct *submit_wq,
|
|
uint32_t hw_submission, unsigned hang_limit,
|
|
long timeout, struct workqueue_struct *timeout_wq,
|
|
atomic_t *score, const char *name,
|
|
struct device *dev);
|
|
void xe_sched_fini(struct xe_gpu_scheduler *sched);
|
|
|
|
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
|
|
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
|
|
void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched);
|
|
|
|
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
|
|
|
|
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
|
struct xe_sched_msg *msg);
|
|
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
|
|
struct xe_sched_msg *msg);
|
|
|
|
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
|
|
{
|
|
spin_lock(&sched->base.job_list_lock);
|
|
}
|
|
|
|
static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
|
|
{
|
|
spin_unlock(&sched->base.job_list_lock);
|
|
}
|
|
|
|
static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
|
|
{
|
|
drm_sched_stop(&sched->base, NULL);
|
|
}
|
|
|
|
static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
|
|
{
|
|
drm_sched_tdr_queue_imm(&sched->base);
|
|
}
|
|
|
|
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
|
|
{
|
|
struct drm_sched_job *s_job;
|
|
|
|
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
|
struct drm_sched_fence *s_fence = s_job->s_fence;
|
|
struct dma_fence *hw_fence = s_fence->parent;
|
|
|
|
if (hw_fence && !dma_fence_is_signaled(hw_fence))
|
|
sched->base.ops->run_job(s_job);
|
|
}
|
|
}
|
|
|
|
static inline bool
|
|
xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
|
|
{
|
|
return drm_sched_invalidate_job(&job->drm, threshold);
|
|
}
|
|
|
|
static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
|
|
struct xe_sched_job *job)
|
|
{
|
|
spin_lock(&sched->base.job_list_lock);
|
|
list_add(&job->drm.list, &sched->base.pending_list);
|
|
spin_unlock(&sched->base.job_list_lock);
|
|
}
|
|
|
|
static inline
|
|
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
|
|
{
|
|
struct xe_sched_job *job;
|
|
|
|
spin_lock(&sched->base.job_list_lock);
|
|
job = list_first_entry_or_null(&sched->base.pending_list,
|
|
struct xe_sched_job, drm.list);
|
|
spin_unlock(&sched->base.job_list_lock);
|
|
|
|
return job;
|
|
}
|
|
|
|
static inline int
|
|
xe_sched_entity_init(struct xe_sched_entity *entity,
|
|
struct xe_gpu_scheduler *sched)
|
|
{
|
|
return drm_sched_entity_init(entity, 0,
|
|
(struct drm_gpu_scheduler **)&sched,
|
|
1, NULL);
|
|
}
|
|
|
|
#define xe_sched_entity_fini drm_sched_entity_fini
|
|
|
|
#endif
|