DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
- vma = vm_area_alloc();
+ vma = vm_area_alloc(mm);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
- INIT_LIST_HEAD(&vma->anon_vma_chain);
/*
* partially initialize the vma for the sampling buffer
*/
- vma->vm_mm = mm;
vma->vm_file = get_file(filp);
vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = vm_area_alloc();
+ vma = vm_area_alloc(current->mm);
if (vma) {
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = vm_area_alloc();
+ vma = vm_area_alloc(current->mm);
if (vma) {
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm;
- bprm->vma = vma = vm_area_alloc();
+ bprm->vma = vma = vm_area_alloc(mm);
if (!vma)
return -ENOMEM;
err = -EINTR;
goto err_free;
}
- vma->vm_mm = mm;
/*
* Place the stack at the largest stack address the architecture
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- INIT_LIST_HEAD(&vma->anon_vma_chain);
err = insert_vm_struct(mm, vma);
if (err)
* mmap() functions).
*/
-struct vm_area_struct *vm_area_alloc(void);
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
-struct vm_area_struct *vm_area_alloc(void)
+struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
{
- return kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+ if (vma) {
+ vma->vm_mm = mm;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+ }
+ return vma;
}
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
- vma = vm_area_alloc();
+ vma = vm_area_alloc(mm);
if (!vma) {
error = -ENOMEM;
goto unacct_error;
}
- vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
- INIT_LIST_HEAD(&vma->anon_vma_chain);
if (file) {
if (vm_flags & VM_DENYWRITE) {
/*
* create a vma struct for an anonymous mapping
*/
- vma = vm_area_alloc();
+ vma = vm_area_alloc(mm);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
int ret;
struct vm_area_struct *vma;
- vma = vm_area_alloc();
+ vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
if (!region)
goto error_getting_region;
- vma = vm_area_alloc();
+ vma = vm_area_alloc(current->mm);
if (!vma)
goto error_getting_vma;
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
- INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_flags = vm_flags;
vma->vm_pgoff = pgoff;