struct binder_fd_array_object *fda;
struct binder_buffer_object *parent;
struct binder_object ptr_object;
- uintptr_t parent_buffer;
u32 *fd_array;
size_t fd_index;
binder_size_t fd_buf_size;
debug_id);
continue;
}
- /*
- * Since the parent was already fixed up, convert it
- * back to kernel address space to access it
- */
- parent_buffer = parent->buffer -
- binder_alloc_get_user_buffer_offset(
- &proc->alloc);
-
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
pr_err("transaction release %d invalid number of fds (%lld)\n",
debug_id, (u64)fda->num_fds);
continue;
}
- fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
+ fd_array = (u32 *)(uintptr_t)
+ (parent->buffer + fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds;
fd_index++) {
u32 fd;
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
- uintptr_t parent_buffer;
u32 *fd_array;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
proc->pid, thread->pid, (u64)fda->num_fds);
return -EINVAL;
}
- /*
- * Since the parent was already fixed up, convert it
- * back to the kernel address space to access it
- */
- parent_buffer = parent->buffer -
- binder_alloc_get_user_buffer_offset(&target_proc->alloc);
- fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
+ fd_array = (u32 *)(uintptr_t)(parent->buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
binder_size_t last_fixup_min_off)
{
struct binder_buffer_object *parent;
- u8 *parent_buffer;
struct binder_buffer *b = t->buffer;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
proc->pid, thread->pid);
return -EINVAL;
}
- parent_buffer = (u8 *)((uintptr_t)parent->buffer -
- binder_alloc_get_user_buffer_offset(
- &target_proc->alloc));
buffer_offset = bp->parent_offset +
- (uintptr_t)parent_buffer - (uintptr_t)b->data;
+ (uintptr_t)parent->buffer - (uintptr_t)b->data;
binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
&bp->buffer, sizeof(bp->buffer));
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
- char *kptr = t->buffer->data + buf_offset;
- t->security_ctx = (uintptr_t)kptr +
- binder_alloc_get_user_buffer_offset(&target_proc->alloc);
+ t->security_ctx = (uintptr_t)t->buffer->data + buf_offset;
binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
secctx, secctx_sz);
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
- bp->buffer = (uintptr_t)sg_bufp +
- binder_alloc_get_user_buffer_offset(
- &target_proc->alloc);
+ bp->buffer = (uintptr_t)sg_bufp;
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp,
}
trd->data_size = t->buffer->data_size;
trd->offsets_size = t->buffer->offsets_size;
- trd->data.ptr.buffer = (binder_uintptr_t)
- ((uintptr_t)t->buffer->data +
- binder_alloc_get_user_buffer_offset(&proc->alloc));
+ trd->data.ptr.buffer = (uintptr_t)t->buffer->data;
trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- void *kern_ptr;
+ void *uptr;
- kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+ uptr = (void *)user_ptr;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer->data)
+ if (uptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer->data)
+ else if (uptr > buffer->data)
n = n->rb_right;
else {
/*
page->alloc = alloc;
INIT_LIST_HEAD(&page->lru);
- user_page_addr =
- (uintptr_t)page_addr + alloc->user_buffer_offset;
+ user_page_addr = (uintptr_t)page_addr;
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
}
alloc->buffer = (void *)vma->vm_start;
- alloc->user_buffer_offset = 0;
mutex_unlock(&binder_alloc_mmap_lock);
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(vma,
- page_addr + alloc->user_buffer_offset,
- PAGE_SIZE);
+ zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
* (invariant after init)
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
* @buffer: base of per-proc address space mapped via mmap
- * @user_buffer_offset: offset between user and kernel VAs for buffer
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
- ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
return free_async_space;
}
-/**
- * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
- * @alloc: binder_alloc for this proc
- *
- * Return: the offset between kernel and user-space addresses to use for
- * virtual address conversion
- */
-static inline ptrdiff_t
-binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
-{
- /*
- * user_buffer_offset is constant if vma is set and
- * undefined if vma is not set. It is possible to
- * get here with !alloc->vma if the target process
- * is dying while a transaction is being initiated.
- * Returning the old value is ok in this case and
- * the transaction will fail.
- */
- return alloc->user_buffer_offset;
-}
-
unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer,