(1) s/PAGE_SIZE/MMUPAGE_SIZE/ in oprofile vma futzing (2) trim excess memory allocations in mm/vmalloc.c Note this fix is not the only one needed to run oprofile on virgin 2.5.64 drivers/oprofile/buffer_sync.c | 2 +- mm/vmalloc.c | 27 ++++++++++----------------- 2 files changed, 11 insertions(+), 18 deletions(-) diff -urpN virgin-2.5.64/drivers/oprofile/buffer_sync.c linux-2.5.64/drivers/oprofile/buffer_sync.c --- virgin-2.5.64/drivers/oprofile/buffer_sync.c Tue Mar 4 19:29:04 2003 +++ linux-2.5.64/drivers/oprofile/buffer_sync.c Wed Mar 12 07:11:12 2003 @@ -181,7 +185,7 @@ static unsigned long lookup_dcookie(stru cookie = fast_get_dcookie(vma->vm_file->f_dentry, vma->vm_file->f_vfsmnt); - *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; + *offset = MMUPAGE_SIZE*vma->vm_pgoff + addr - vma->vm_start; break; } out: diff -urpN virgin-2.5.64/mm/vmalloc.c linux-2.5.64/mm/vmalloc.c --- virgin-2.5.64/mm/vmalloc.c Wed Mar 12 16:32:30 2003 +++ linux-2.5.64/mm/vmalloc.c Wed Mar 12 16:08:37 2003 @@ -102,7 +102,8 @@ void unmap_vm_area(struct vm_struct *are int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) { unsigned long address = VMALLOC_VMADDR(area->addr); - unsigned long end = address + (area->size-MMUPAGE_SIZE); + /* don't instantiate PTE's for the guard page */ + unsigned long end = address + area->size - MMUPAGE_SIZE; unsigned long voffset = 0; pgd_t *pgd; int err = 0; @@ -313,10 +314,10 @@ void *vmap(struct page **pages, unsigned { struct vm_struct *area; - if (count > num_physpages) + if (PAGE_MMUCOUNT*count > num_physpages) return NULL; - area = get_vm_area((count << PAGE_SHIFT), VM_MAP); + area = get_vm_area(PAGE_SIZE*count, VM_MAP); if (!area) return NULL; if (map_vm_area(area, PAGE_KERNEL, &pages)) { @@ -352,8 +353,8 @@ void *__vmalloc(unsigned long size, int if (!area) return NULL; - nr_pages = size >> MMUPAGE_SHIFT; - array_size = (nr_pages * sizeof(struct page *)); + nr_pages = PAGE_ALIGN(size)/PAGE_SIZE; + array_size = nr_pages * sizeof(struct page *); area->nr_pages = nr_pages; area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); @@ -411,10 +412,6 @@ void *vmalloc_32(unsigned long size) return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); } -/* - * XXX: broken, fix eventually - * -- wli - */ long vread(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; @@ -428,7 +425,7 @@ long vread(char *buf, char *addr, unsign read_lock(&vmlist_lock); for (tmp = vmlist; tmp; tmp = tmp->next) { vaddr = (char *) tmp->addr; - if (addr >= vaddr + tmp->size - PAGE_SIZE) + if (addr >= vaddr + tmp->size - MMUPAGE_SIZE) continue; while (addr < vaddr) { if (count == 0) @@ -438,7 +435,7 @@ long vread(char *buf, char *addr, unsign addr++; count--; } - n = vaddr + tmp->size - PAGE_SIZE - addr; + n = vaddr + tmp->size - MMUPAGE_SIZE - addr; do { if (count == 0) goto finished; @@ -453,10 +450,6 @@ finished: return buf - buf_start; } -/* - * XXX: broken, fix eventually - * -- wli - */ long vwrite(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; @@ -470,7 +463,7 @@ long vwrite(char *buf, char *addr, unsig read_lock(&vmlist_lock); for (tmp = vmlist; tmp; tmp = tmp->next) { vaddr = (char *) tmp->addr; - if (addr >= vaddr + tmp->size - PAGE_SIZE) + if (addr >= vaddr + tmp->size - MMUPAGE_SIZE) continue; while (addr < vaddr) { if (count == 0) @@ -479,7 +472,7 @@ long vwrite(char *buf, char *addr, unsig addr++; count--; } - n = vaddr + tmp->size - PAGE_SIZE - addr; + n = vaddr + tmp->size - MMUPAGE_SIZE - addr; do { if (count == 0) goto finished;