mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-18 06:33:43 -04:00
drm/amdkfd: Introduce kfd_node struct (v5)
Introduce a new structure, kfd_node, which will now represent a compute node. kfd_node is carved out of kfd_dev structure. kfd_dev struct now will become the parent of kfd_node, and will store common resources such as doorbells, GTT sub-alloctor etc. kfd_node struct will store all resources specific to a compute node, such as device queue manager, interrupt handling etc. This is the first step in adding compute partition support in KFD. v2: introduce kfd_node struct to gc v11 (Hawking) v3: make reference to kfd_dev struct through kfd_node (Morris) v4: use kfd_node instead for kfd isr/mqd functions (Morris) v5: rebase (Alex) Signed-off-by: Mukul Joshi <mukul.joshi@amd.com> Tested-by: Amber Lin <Amber.Lin@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Morris Zhang <Shiwu.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
5cf1675591
commit
8dc1db3172
@@ -50,29 +50,29 @@
|
||||
|
||||
static void interrupt_wq(struct work_struct *);
|
||||
|
||||
int kfd_interrupt_init(struct kfd_dev *kfd)
|
||||
int kfd_interrupt_init(struct kfd_node *node)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = kfifo_alloc(&kfd->ih_fifo,
|
||||
KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size,
|
||||
r = kfifo_alloc(&node->ih_fifo,
|
||||
KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size,
|
||||
GFP_KERNEL);
|
||||
if (r) {
|
||||
dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n");
|
||||
dev_err(node->adev->dev, "Failed to allocate IH fifo\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
|
||||
if (unlikely(!kfd->ih_wq)) {
|
||||
kfifo_free(&kfd->ih_fifo);
|
||||
dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n");
|
||||
node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
|
||||
if (unlikely(!node->ih_wq)) {
|
||||
kfifo_free(&node->ih_fifo);
|
||||
dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&kfd->interrupt_lock);
|
||||
spin_lock_init(&node->interrupt_lock);
|
||||
|
||||
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
|
||||
INIT_WORK(&node->interrupt_work, interrupt_wq);
|
||||
|
||||
kfd->interrupts_active = true;
|
||||
node->interrupts_active = true;
|
||||
|
||||
/*
|
||||
* After this function returns, the interrupt will be enabled. This
|
||||
@@ -84,7 +84,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kfd_interrupt_exit(struct kfd_dev *kfd)
|
||||
void kfd_interrupt_exit(struct kfd_node *node)
|
||||
{
|
||||
/*
|
||||
* Stop the interrupt handler from writing to the ring and scheduling
|
||||
@@ -93,31 +93,31 @@ void kfd_interrupt_exit(struct kfd_dev *kfd)
|
||||
*/
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kfd->interrupt_lock, flags);
|
||||
kfd->interrupts_active = false;
|
||||
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
|
||||
spin_lock_irqsave(&node->interrupt_lock, flags);
|
||||
node->interrupts_active = false;
|
||||
spin_unlock_irqrestore(&node->interrupt_lock, flags);
|
||||
|
||||
/*
|
||||
* flush_work ensures that there are no outstanding
|
||||
* work-queue items that will access interrupt_ring. New work items
|
||||
* can't be created because we stopped interrupt handling above.
|
||||
*/
|
||||
flush_workqueue(kfd->ih_wq);
|
||||
flush_workqueue(node->ih_wq);
|
||||
|
||||
kfifo_free(&kfd->ih_fifo);
|
||||
kfifo_free(&node->ih_fifo);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assumption: single reader/writer. This function is not re-entrant
|
||||
*/
|
||||
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
|
||||
kfd->device_info.ih_ring_entry_size);
|
||||
if (count != kfd->device_info.ih_ring_entry_size) {
|
||||
dev_dbg_ratelimited(kfd->adev->dev,
|
||||
count = kfifo_in(&node->ih_fifo, ih_ring_entry,
|
||||
node->kfd->device_info.ih_ring_entry_size);
|
||||
if (count != node->kfd->device_info.ih_ring_entry_size) {
|
||||
dev_dbg_ratelimited(node->adev->dev,
|
||||
"Interrupt ring overflow, dropping interrupt %d\n",
|
||||
count);
|
||||
return false;
|
||||
@@ -129,32 +129,32 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
/*
|
||||
* Assumption: single reader/writer. This function is not re-entrant
|
||||
*/
|
||||
static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
|
||||
static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
|
||||
kfd->device_info.ih_ring_entry_size);
|
||||
count = kfifo_out(&node->ih_fifo, ih_ring_entry,
|
||||
node->kfd->device_info.ih_ring_entry_size);
|
||||
|
||||
WARN_ON(count && count != kfd->device_info.ih_ring_entry_size);
|
||||
WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
|
||||
|
||||
return count == kfd->device_info.ih_ring_entry_size;
|
||||
return count == node->kfd->device_info.ih_ring_entry_size;
|
||||
}
|
||||
|
||||
static void interrupt_wq(struct work_struct *work)
|
||||
{
|
||||
struct kfd_dev *dev = container_of(work, struct kfd_dev,
|
||||
struct kfd_node *dev = container_of(work, struct kfd_node,
|
||||
interrupt_work);
|
||||
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
|
||||
unsigned long start_jiffies = jiffies;
|
||||
|
||||
if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
|
||||
if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
|
||||
dev_err_once(dev->adev->dev, "Ring entry too small\n");
|
||||
return;
|
||||
}
|
||||
|
||||
while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
|
||||
dev->device_info.event_interrupt_class->interrupt_wq(dev,
|
||||
dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
|
||||
ih_ring_entry);
|
||||
if (time_is_before_jiffies(start_jiffies + HZ)) {
|
||||
/* If we spent more than a second processing signals,
|
||||
@@ -166,14 +166,14 @@ static void interrupt_wq(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
bool interrupt_is_wanted(struct kfd_dev *dev,
|
||||
bool interrupt_is_wanted(struct kfd_node *dev,
|
||||
const uint32_t *ih_ring_entry,
|
||||
uint32_t *patched_ihre, bool *flag)
|
||||
{
|
||||
/* integer and bitwise OR so there is no boolean short-circuiting */
|
||||
unsigned int wanted = 0;
|
||||
|
||||
wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev,
|
||||
wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev,
|
||||
ih_ring_entry, patched_ihre, flag);
|
||||
|
||||
return wanted != 0;
|
||||
|
||||
Reference in New Issue
Block a user