binder: rename alloc->buffer to vm_start

The alloc->buffer field in struct binder_alloc stores the starting
address of the mapped vma, rename this field to alloc->vm_start to
better reflect its purpose. It also avoids confusion with the binder
buffer concept, e.g. transaction->buffer.

No functional changes in this patch.

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20241210143114.661252-7-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Carlos Llamas
2024-12-10 14:31:02 +00:00
committed by Greg Kroah-Hartman
parent 072010abc3
commit 0a7bf6866d
5 changed files with 19 additions and 19 deletions

View File

@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
return alloc->buffer + alloc->buffer_size - buffer->user_data;
return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -203,7 +203,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
page = binder_get_installed_page(alloc, index);
if (!page)
continue;
@@ -305,7 +305,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
FOLL_NOFAULT, &page, NULL);
if (npages <= 0) {
pr_err("%d: failed to find page at offset %lx\n",
alloc->pid, addr - alloc->buffer);
alloc->pid, addr - alloc->vm_start);
ret = -ESRCH;
break;
}
@@ -317,7 +317,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
default:
binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
alloc->pid, __func__, addr - alloc->buffer, ret);
alloc->pid, __func__, addr - alloc->vm_start, ret);
ret = -ENOMEM;
break;
}
@@ -342,7 +342,7 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
if (binder_get_installed_page(alloc, index))
continue;
@@ -371,7 +371,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
page = binder_get_installed_page(alloc, index);
if (page) {
@@ -723,8 +723,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
BUG_ON(buffer->user_data < alloc->buffer);
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
BUG_ON(buffer->user_data < alloc->vm_start);
BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -783,7 +783,7 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
(buffer->user_data - alloc->buffer);
(buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
@@ -882,7 +882,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer = vma->vm_start;
alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
@@ -900,7 +900,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
buffer->user_data = alloc->buffer;
buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -915,7 +915,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
alloc->buffer = 0;
alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -1016,7 +1016,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
buffer->user_data - alloc->buffer,
buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
@@ -1121,7 +1121,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_get_alloc_mutex_failed;
index = mdata->page_index;
page_addr = alloc->buffer + index * PAGE_SIZE;
page_addr = alloc->vm_start + index * PAGE_SIZE;
vma = vma_lookup(mm, page_addr);
/*