Revert "binder: switch alloc->mutex to spinlock_t"

This reverts commit 7710e2cca3.

In preparation for concurrent page installations, restore the original
alloc->mutex which will serialize zap_page_range_single() against page
installations in subsequent patches (instead of the mmap_sem).

Resolved trivial conflicts with commit 2c10a20f5e ("binder_alloc: Fix
sleeping function called from invalid context") and commit da0c02516c
("mm/list_lru: simplify the list_lru walk callback function").

Cc: Mukesh Ojha <quic_mojha@quicinc.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20241210143114.661252-2-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Carlos Llamas
2024-12-10 14:30:57 +00:00
committed by Greg Kroah-Hartman
parent 94ddd8bf98
commit 8b52c7261e
2 changed files with 28 additions and 28 deletions

View File

@@ -169,9 +169,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
return buffer;
}
@@ -597,10 +597,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +608,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -785,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
* increase contention for the alloc->lock if clear_on_free
* is used frequently for large buffers. This lock is not
* increase contention for the alloc mutex if clear_on_free
* is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
}
/**
@@ -893,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -940,7 +940,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
page_count++;
}
}
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,7 +964,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
@@ -974,7 +974,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
}
/**
@@ -991,7 +991,7 @@ void binder_alloc_print_pages(struct seq_file *m,
int lru = 0;
int free = 0;
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
@@ -1007,7 +1007,7 @@ void binder_alloc_print_pages(struct seq_file *m,
lru++;
}
}
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1023,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
spin_lock(&alloc->lock);
mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
return count;
}
@@ -1070,8 +1070,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget;
if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed;
if (!spin_trylock(&alloc->lock))
goto err_get_alloc_lock_failed;
if (!mutex_trylock(&alloc->mutex))
goto err_get_alloc_mutex_failed;
if (!page->page_ptr)
goto err_page_already_freed;
@@ -1090,7 +1090,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
spin_unlock(&alloc->lock);
mutex_unlock(&alloc->mutex);
spin_unlock(&lru->lock);
if (vma) {
@@ -1109,8 +1109,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_invalid_vma:
err_page_already_freed:
spin_unlock(&alloc->lock);
err_get_alloc_lock_failed:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
mmap_read_unlock(mm);
err_mmap_read_lock_failed:
mmput_async(mm);
@@ -1145,7 +1145,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
spin_lock_init(&alloc->lock);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}