diff -prauN pgcl-2.6.0-test5-bk3-6/include/linux/folio.h pgcl-2.6.0-test5-bk3-7/include/linux/folio.h --- pgcl-2.6.0-test5-bk3-6/include/linux/folio.h 2003-11-18 05:46:52.000000000 -0800 +++ pgcl-2.6.0-test5-bk3-7/include/linux/folio.h 2003-11-20 10:47:07.000000000 -0800 @@ -526,11 +526,41 @@ static /* inline */ void flush_folio(pte flush_tlb_range(vma, start, end); } -static /* inline */ void adjust_page_count(struct page *page, int count) +#if 1 +static inline void __adjust_page_count(const char *file, + int line, + const char *func, + struct page *page, + int count) +{ + pr_debug("%d: adjust_page_count(0x%lx, %d) in %s, %s:%d with " + "count %d (expected %d)\n", + current->pid, + page_to_pfn(page), + count, + func, + file, + line, + page_count(page), + page_count_expected(page)); + + BUG_ON(page_count(page) + count <= 0); + atomic_add(count, &page->count); +} +#else +static inline void __adjust_page_count(const char *file, + int line, + const char *func, + struct page *page, + int count) { BUG_ON(page_count(page) + count <= 0); atomic_add(count, &page->count); } +#endif + +#define adjust_page_count(page, count) \ + __adjust_page_count(__FILE__, __LINE__, __FUNCTION__, page, count) #else /* PAGE_MMUSHIFT 0 */ diff -prauN pgcl-2.6.0-test5-bk3-6/include/linux/mm.h pgcl-2.6.0-test5-bk3-7/include/linux/mm.h --- pgcl-2.6.0-test5-bk3-6/include/linux/mm.h 2003-11-18 05:46:52.000000000 -0800 +++ pgcl-2.6.0-test5-bk3-7/include/linux/mm.h 2003-11-20 10:46:59.000000000 -0800 @@ -200,12 +200,18 @@ struct page { #endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */ }; +#ifndef CONFIG_DISCONTIGMEM +/* The array of struct pages - for discontigmem use pgdat->lmem_map */ +extern struct page *mem_map; +#endif + /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) */ #include +int page_count_expected(struct page *); /* * Methods to modify the page usage count. * @@ -218,11 +224,32 @@ struct page { * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. */ -#define put_page_testzero(p) \ +#define __put_page_testzero(p) \ ({ \ BUG_ON(page_count(p) <= 0); \ atomic_dec_and_test(&(p)->count); \ }) +#if 0 +#define _put_page_testzero(file, line, func, p) __put_page_testzero(p) +#else +#define _put_page_testzero(file, line, func, p) \ +({ \ + struct page *__page__ = p; \ + pr_debug("%d: put_page_testzero(0x%lx) in %s, %s:%d with " \ + "count %d (expected %d)\n", \ + current->pid, \ + page_to_pfn(__page__), \ + func, \ + file, \ + line, \ + page_count(__page__), \ + page_count_expected(__page__)); \ + __put_page_testzero(__page__); \ +}) +#endif + +#define put_page_testzero(p) \ + _put_page_testzero(__FILE__, __LINE__, __FUNCTION__, p) #define page_count(p) atomic_read(&(p)->count) #define set_page_count(p,v) atomic_set(&(p)->count, v) @@ -232,14 +259,14 @@ extern void FASTCALL(__page_cache_releas #ifdef CONFIG_HUGETLB_PAGE -static inline void get_page(struct page *page) +static inline void __get_page(struct page *page) { if (PageCompound(page)) page = (struct page *)page->lru.next; atomic_inc(&page->count); } -static inline void put_page(struct page *page) +static inline void ___put_page(struct page *page) { if (PageCompound(page)) { page = (struct page *)page->lru.next; @@ -258,20 +285,69 @@ static inline void put_page(struct page #else /* CONFIG_HUGETLB_PAGE */ -static inline void get_page(struct page *page) +static inline void ___put_page(struct page *page) { - atomic_inc(&page->count); + if (!PageReserved(page) && __put_page_testzero(page)) + __page_cache_release(page); } -static inline void put_page(struct page *page) +static inline void __get_page(struct page *page) { - if (!PageReserved(page) && put_page_testzero(page)) - __page_cache_release(page); + atomic_inc(&page->count); } #endif /* CONFIG_HUGETLB_PAGE */ /* + * Wrapper layer for deciding (at compile-time) + * whether to do printk's or not. + */ +#if 0 + +#define _get_page(file, line, func, p) __get_page(p) +#define _put_page(file, line, func, p) ___put_page(p) + +#else + +#define _put_page(file, line, func, p) \ +do { \ + struct page *__page__ = p; \ + pr_debug("%d: put_page(0x%lx) in %s, %s:%d with " \ + "count %d (expected %d)\n", \ + current->pid, \ + page_to_pfn(__page__), \ + func, \ + file, \ + line, \ + page_count(__page__), \ + page_count_expected(__page__)); \ + ___put_page(__page__); \ +} while (0) + +#define _get_page(file, line, func, p) \ +do { \ + struct page *__page__ = p; \ + pr_debug("%d: get_page(0x%lx) in %s, %s:%d with " \ + "count %d (expected %d)\n", \ + current->pid, \ + page_to_pfn(__page__), \ + func, \ + file, \ + line, \ + page_count(__page__), \ + page_count_expected(__page__)); \ + __get_page(__page__); \ +} while (0) + +#endif + +/* + * Wrapper layer for grabbing __FILE__, __LINE__, etc. + */ +#define get_page(p) _get_page(__FILE__, __LINE__, __FUNCTION__, p) +#define put_page(p) _put_page(__FILE__, __LINE__, __FUNCTION__, p) + +/* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of * zeroes, and text pages of executables and shared libraries have @@ -341,11 +417,6 @@ static inline void set_page_zone(struct page->flags |= zone_num << ZONE_SHIFT; } -#ifndef CONFIG_DISCONTIGMEM -/* The array of struct pages - for discontigmem use pgdat->lmem_map */ -extern struct page *mem_map; -#endif - static inline void *lowmem_page_address(struct page *page) { return __va(page_to_pfn(page) << MMUPAGE_SHIFT); diff -prauN pgcl-2.6.0-test5-bk3-6/include/linux/pagemap.h pgcl-2.6.0-test5-bk3-7/include/linux/pagemap.h --- pgcl-2.6.0-test5-bk3-6/include/linux/pagemap.h 2003-09-14 23:49:20.000000000 -0700 +++ pgcl-2.6.0-test5-bk3-7/include/linux/pagemap.h 2003-11-20 10:46:44.000000000 -0800 @@ -49,8 +49,46 @@ static inline void mapping_set_gfp_mask( #define PAGE_CACHE_MMUSHIFT (PAGE_CACHE_SHIFT - MMUPAGE_SHIFT) #define PAGE_CACHE_MMUCOUNT (PAGE_CACHE_SIZE/MMUPAGE_SIZE) -#define page_cache_get(page) get_page(page) -#define page_cache_release(page) put_page(page) +#if 1 +#define __page_cache_get__(file, line, func, pg) \ +do { \ + struct page *__page__ = pg; \ + pr_debug("%d: page_cache_get(0x%lx) in %s, %s:%d with " \ + "count %d (expected %d)\n", \ + current->pid, \ + page_to_pfn(__page__), \ + func, \ + file, \ + line, \ + page_count(__page__), \ + page_count_expected(__page__)); \ + __get_page(__page__); \ +} while (0) + +#define __page_cache_release__(file, line, func, pg) \ +do { \ + struct page *__page__ = pg; \ + pr_debug("%d: page_cache_release(0x%lx) in %s, %s:%d with " \ + "count %d (expected %d)\n", \ + current->pid, \ + page_to_pfn(__page__), \ + func, \ + file, \ + line, \ + page_count(__page__), \ + page_count_expected(__page__)); \ + ___put_page(__page__); \ +} while (0) +#else +#define __page_cache_get__(file, line, func, page) get_page(page) +#define __page_cache_release__(file, line, func, page) put_page(page) +#endif + +#define page_cache_get(page) \ + __page_cache_get__(__FILE__, __LINE__, __FUNCTION__, page) +#define page_cache_release(page) \ + __page_cache_release__(__FILE__, __LINE__, __FUNCTION__, page) + void release_pages(struct page **pages, int nr, int cold); static inline struct page *page_cache_alloc(struct address_space *x) diff -prauN pgcl-2.6.0-test5-bk3-6/include/linux/swap.h pgcl-2.6.0-test5-bk3-7/include/linux/swap.h --- pgcl-2.6.0-test5-bk3-6/include/linux/swap.h 2003-11-18 05:30:34.000000000 -0800 +++ pgcl-2.6.0-test5-bk3-7/include/linux/swap.h 2003-11-20 08:41:14.000000000 -0800 @@ -188,6 +188,7 @@ extern int vm_swappiness; #define RMAP_FASTCALL(x) x #endif +int page_count_expected(struct page *); int RMAP_FASTCALL(page_referenced(struct page *)); struct pte_chain *RMAP_FASTCALL(page_add_rmap(struct page *, pte_t *, struct pte_chain *)); diff -prauN pgcl-2.6.0-test5-bk3-6/mm/memory.c pgcl-2.6.0-test5-bk3-7/mm/memory.c --- pgcl-2.6.0-test5-bk3-6/mm/memory.c 2003-11-18 05:46:52.000000000 -0800 +++ pgcl-2.6.0-test5-bk3-7/mm/memory.c 2003-11-20 09:57:42.000000000 -0800 @@ -1045,6 +1045,9 @@ static int do_wp_page(struct mm_struct * old_page = pfn_to_page(pfn); reprep = prepare_folio(folio, vma, address, ptep_to_paddr(page_table), 1); new_page = private_folio_page(folio, PAGE_MMUSHIFT ? NULL : old_page); + + if (!PageReserved(old_page)) + page_cache_get(old_page); if (new_page) { pr_debug("%d: got private page\n", current->pid); page_cache_get(new_page); @@ -1052,13 +1055,12 @@ static int do_wp_page(struct mm_struct * } pte_unmap(page_table); - - page_cache_get(old_page); spin_unlock(&mm->page_table_lock); pte_chain = pte_chain_alloc(GFP_KERNEL); if (!pte_chain) { - page_cache_release(old_page); + if (!PageReserved(old_page)) + page_cache_release(old_page); goto oom; } new_page = alloc_page(GFP_HIGHUSER); @@ -1066,7 +1068,8 @@ static int do_wp_page(struct mm_struct * current->pid, page_to_pfn(new_page)); if (!new_page) { - page_cache_release(old_page); + if (!PageReserved(old_page)) + page_cache_release(old_page); goto oom; } @@ -1101,7 +1104,7 @@ got_page: if (PageReserved(old_page)) mm->rss += rss; else - adjust_page_count(old_page, 1 - rss); + adjust_page_count(old_page, -rss); } pte_unmap(page_table); flush_folio(folio, vma, address); @@ -1515,7 +1518,8 @@ retry: current->pid, page_to_pfn(new_page)); if (!new_page) { - page_cache_release(page); + if (!PageReserved(page)) + page_cache_release(page); goto oom; } lru_cache_add_active(new_page); @@ -1532,7 +1536,8 @@ retry: (unlikely(sequence != atomic_read(&mapping->truncate_count)))) { sequence = atomic_read(&mapping->truncate_count); spin_unlock(&mm->page_table_lock); - page_cache_release(new_page); + if (new_page) + page_cache_release(new_page); goto retry; } page_table = pte_offset_map(pmd, address); @@ -1551,7 +1556,8 @@ retry: new_page = private_folio_page_xchg(folio, new_page); restrict_folio(folio, vma, address, page_table); copy_folio(folio, new_page, page, address); - page_cache_release(page); + if (!PageReserved(page)) + page_cache_release(page); page = new_page; } else restrict_folio(folio, vma, address, page_table); diff -prauN pgcl-2.6.0-test5-bk3-6/mm/rmap.c pgcl-2.6.0-test5-bk3-7/mm/rmap.c --- pgcl-2.6.0-test5-bk3-6/mm/rmap.c 2003-11-18 05:46:52.000000000 -0800 +++ pgcl-2.6.0-test5-bk3-7/mm/rmap.c 2003-11-20 08:43:09.000000000 -0800 @@ -127,6 +127,42 @@ pte_chain_encode(struct pte_chain *pte_c **/ /** + * page_count_expected - find the expected refcount + * @page: the page to check + * + * This finds the expected reference count from page attributes. + */ +int page_count_expected(struct page *page) +{ + int count = 0; + + pte_chain_lock(page); + if (PageDirect(page) && page->pte.direct) + ++count; + else { + struct pte_chain *chain = page->pte.chain; + + while (chain) { + int k; + + for (k = NRPTE - 1; k >= 0; --k) { + if (!chain->ptes[k]) + break; + else + ++count; + } + chain = pte_chain_next(chain); + } + } + if (page->mapping) + ++count; + if (PagePrivate(page)) + ++count; + pte_chain_unlock(page); + return count; +} + +/** * page_referenced - test if the page was referenced * @page: the page to test *