Remove a rather large amount of debugging cruft. Separated out due to its size; shifting around this much code could potentially destabilize things. Just to be on the safe side. diff -prauN pgcl-2.5.70-bk14-2/Makefile pgcl-2.5.70-bk14-3/Makefile --- pgcl-2.5.70-bk14-2/Makefile 2003-06-09 12:30:17.000000000 -0700 +++ pgcl-2.5.70-bk14-3/Makefile 2003-06-10 00:27:36.000000000 -0700 @@ -216,7 +216,7 @@ NOSTDINC_FLAGS = -nostdinc -iwithprefix CPPFLAGS := -D__KERNEL__ -Iinclude CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ - -fno-strict-aliasing -fno-common -g + -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS) export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION KERNELRELEASE ARCH \ diff -prauN pgcl-2.5.70-bk14-2/mm/memory.c pgcl-2.5.70-bk14-3/mm/memory.c --- pgcl-2.5.70-bk14-2/mm/memory.c 2003-06-10 00:16:48.000000000 -0700 +++ pgcl-2.5.70-bk14-3/mm/memory.c 2003-06-10 00:39:15.000000000 -0700 @@ -65,9 +65,6 @@ unsigned long num_physpages; void * high_memory; struct page *highmem_start_page; -void check_rt_hash(void); -int check_rt_addr(void *); - /* * We special-case the C-O-W ZERO_PAGE, because it's such * a common occurrence (no need to read the page to know @@ -1020,8 +1017,6 @@ int remap_page_range(struct vm_area_stru */ static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry) { - BUG_ON((unsigned long)page_table > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)page_table < PAGE_OFFSET); set_pte(page_table, entry); flush_tlb_page(vma, address); update_mmu_cache(vma, address, entry); @@ -1030,11 +1025,11 @@ static inline void establish_pte(struct /* * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock */ -static void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, pte_t *page_table, unsigned long subpfn) +static inline void break_cow(struct vm_area_struct *vma, + struct page *new_page, unsigned long address, + pte_t *page_table, unsigned long subpfn) { pte_t pte = pfn_pte(page_to_pfn(new_page) + subpfn, vma->vm_page_prot); - BUG_ON((unsigned long)page_table > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)page_table < PAGE_OFFSET); invalidate_vcache(address, vma->vm_mm, new_page); flush_cache_page(vma, address); establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(pte))); @@ -1045,7 +1040,6 @@ static pte_t *fill_anonymizable_ptevec(s pte_t *ptes[], unsigned long addr, int map_pte, int cow) { - int k; unsigned long lo_vaddr, hi_vaddr, dn_vaddr, up_vaddr, dn_subpfn, up_subpfn, rss = 0, loop; pte_t *up_pte, *dn_pte; @@ -1077,7 +1071,6 @@ static pte_t *fill_anonymizable_ptevec(s loop = 0; if (dn_vaddr >= lo_vaddr && dn_subpfn <= up_subpfn) { - BUG_ON((unsigned long)dn_pte > (unsigned long)(-PAGE_SIZE)); if (pte_none(*dn_pte) || (cow && pte_present(*dn_pte) && !pte_write(*dn_pte))) { ptes[dn_subpfn] = dn_pte; dn_subpfn++; @@ -1093,7 +1086,6 @@ static pte_t *fill_anonymizable_ptevec(s } if (up_vaddr < hi_vaddr && up_subpfn > dn_subpfn) { - BUG_ON((unsigned long)up_pte > (unsigned long)(-PAGE_SIZE)); if (pte_none(*up_pte) || (cow && pte_present(*up_pte) && !pte_write(*up_pte))) { ptes[up_subpfn] = up_pte; up_subpfn--; @@ -1115,21 +1107,6 @@ static pte_t *fill_anonymizable_ptevec(s } while ((up_vaddr < hi_vaddr || dn_vaddr >= lo_vaddr) && loop && up_subpfn >= dn_subpfn); - for (k = 0; k < PAGE_MMUCOUNT; ++k) { - if (!ptes[k]) - continue; - BUG_ON((unsigned long)ptes[k] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[k] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[k])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[k]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[k]))); - BUG_ON(ptep_to_address(ptes[k]) >= MM_VM_SIZE(vma->vm_mm)); - } - pr_debug("finishing PTE search loop\n"); pr_debug("starting PTE instantiation loop\n"); pr_debug("fill_anonymizable_ptevec() saw %lu ptes set\n", rss); @@ -1195,17 +1172,6 @@ static void get_cow_pages(struct vm_area continue; } - BUG_ON((unsigned long)ptes[k] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[k] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[k])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[k]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[k]))); - BUG_ON(ptep_to_address(ptes[k]) >= MM_VM_SIZE(vma->vm_mm)); - if (pte_present(*ptes[k])) { if (pte_write(*ptes[k])) pr_debug("writable pte in get_cow_pages()!\n"); @@ -1257,17 +1223,6 @@ static void save_ptes(pte_t *ptes[], pte if (!ptes[k]) continue; - BUG_ON((unsigned long)ptes[k] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[k] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[k])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[k]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[k]))); - BUG_ON(ptep_to_address(ptes[k]) >= MM_VM_SIZE(vma->vm_mm)); - ptevals[k] = *ptes[k]; ptes[k] = (pte_t *)ptep_to_address(ptes[k]); ++rss; @@ -1295,21 +1250,8 @@ static pte_t *reconstitute_ptes(pmd_t *p return NULL; addr = (unsigned long)ptes[j]; - BUG_ON(addr >= MM_VM_SIZE(vma->vm_mm)); - ptes[j] = pte_offset_map(pmd, addr); - BUG_ON((unsigned long)ptes[j] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[j] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[j])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[j]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[j]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[j]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[j]))); - BUG_ON(ptep_to_address(ptes[j]) >= MM_VM_SIZE(vma->vm_mm)); - for (k = j + 1; k < PAGE_MMUCOUNT; ++k) { unsigned long vaddr; @@ -1323,17 +1265,6 @@ static pte_t *reconstitute_ptes(pmd_t *p ptes[k] = ptes[j] - (addr - vaddr)/MMUPAGE_SIZE; else ptes[k] = ptes[j] + (vaddr - addr)/MMUPAGE_SIZE; - - BUG_ON((unsigned long)ptes[k] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[k] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[k])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[k]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[k]))); - BUG_ON(ptep_to_address(ptes[k]) >= MM_VM_SIZE(vma->vm_mm)); } pr_debug("return 0x%p from reconstitute_ptes()\n", ptes[j]); return ptes[j]; @@ -1348,17 +1279,6 @@ static int recheck_ptes(pte_t *ptes[], u if (!ptes[k] || !pfns[k]) continue; - BUG_ON((unsigned long)ptes[k] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[k] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[k])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[k]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[k]))); - BUG_ON(ptep_to_address(ptes[k]) >= MM_VM_SIZE(vma->vm_mm)); - if (pte_same(*ptes[k], ptevals[k])) ++rss; else { @@ -1390,70 +1310,29 @@ static struct pte_chain *move_mappings(s { unsigned long k; - BUG_ON(!vma); - BUG_ON((unsigned long)vma > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)vma < PAGE_OFFSET); - BUG_ON(check_rt_addr(vma)); - BUG_ON(!new_page); - BUG_ON((unsigned long)new_page > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)new_page < PAGE_OFFSET); - BUG_ON(check_rt_addr(new_page)); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((new_page - mem_map) >= max_mapnr); -#endif - pr_debug("move_mappings()\n"); for (k = 0; k < PAGE_MMUCOUNT; ++k) { struct page *page; int release; - extern unsigned long max_pfn; if (!ptes[k] || !pfns[k]) continue; - BUG_ON((unsigned long)ptes[k] > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)ptes[k] < PAGE_OFFSET); - BUG_ON(check_rt_addr(ptes[k])); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)kmap_atomic_to_page(ptes[k]) < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((kmap_atomic_to_page(ptes[k]) - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(kmap_atomic_to_page(ptes[k]))); - BUG_ON(ptep_to_address(ptes[k]) >= MM_VM_SIZE(vma->vm_mm)); - BUG_ON(pfns[k] > max_pfn); - release = pte_present(*ptes[k]); page = pfn_valid(pfns[k]) ? pfn_to_page(pfns[k]) : NULL; if (page) { -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((page - mem_map) >= max_mapnr); -#endif - BUG_ON(check_rt_addr(page)); - if (PageReserved(page)) { - struct mm_struct *mm = ptep_to_mm(ptes[k]); - BUG_ON(!mm); - BUG_ON((unsigned long)mm > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)mm < PAGE_OFFSET); - mm->rss++; - } else + if (PageReserved(page)) + ptep_to_mm(ptes[k])->rss++; + else page_remove_rmap(page, ptes[k]); } break_cow(vma, new_page, ptep_to_address(ptes[k]), ptes[k], k); - BUG_ON(!pte_chain); - BUG_ON((unsigned long)pte_chain > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)pte_chain < PAGE_OFFSET); - BUG_ON(check_rt_addr(pte_chain)); pte_chain = page_add_rmap_chained(new_page,ptes[k],pte_chain); /* nuke the pte's reference since we retargeted the pte */ if (page && release && !PageReserved(page)) page_cache_release(page); } - if (pte_chain) { - BUG_ON((unsigned long)pte_chain > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)pte_chain < PAGE_OFFSET); - BUG_ON(check_rt_addr(pte_chain)); - } + pr_debug("return 0x%p from move_mappings()\n", pte_chain); return pte_chain; } diff -prauN pgcl-2.5.70-bk14-2/mm/rmap.c pgcl-2.5.70-bk14-3/mm/rmap.c --- pgcl-2.5.70-bk14-2/mm/rmap.c 2003-06-09 15:24:19.000000000 -0700 +++ pgcl-2.5.70-bk14-3/mm/rmap.c 2003-06-10 00:33:32.000000000 -0700 @@ -170,20 +170,6 @@ page_add_rmap(struct page *page, pte_t * pte_addr_t pte_paddr = ptep_to_paddr(ptep); struct pte_chain *cur_pte_chain; - BUG_ON(!pte_chain); - BUG_ON((unsigned long)pte_chain < PAGE_OFFSET); - BUG_ON((unsigned long)pte_chain > (unsigned long)(-PAGE_SIZE)); - BUG_ON(!ptep); - BUG_ON((unsigned long)ptep < PAGE_OFFSET); - BUG_ON((unsigned long)ptep > (unsigned long)(-PAGE_SIZE)); - BUG_ON(pte_chain->next_and_idx); - BUG_ON(!page); - BUG_ON((unsigned long)page < PAGE_OFFSET); - BUG_ON((unsigned long)page > (unsigned long)(-PAGE_SIZE)); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((page - mem_map) >= max_mapnr); -#endif - if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) return pte_chain; @@ -267,17 +253,6 @@ void page_remove_rmap(struct page *page, { pte_addr_t pte_paddr; struct pte_chain *pc; - unsigned long kvaddr = (unsigned long)ptep; - - BUG_ON(!kvaddr); - BUG_ON(kvaddr > (unsigned long)(-PAGE_SIZE)); - BUG_ON(kvaddr < PAGE_OFFSET); - BUG_ON(!page); - BUG_ON((unsigned long)page > (unsigned long)(-PAGE_SIZE)); - BUG_ON((unsigned long)page < PAGE_OFFSET); -#ifndef CONFIG_DISCONTIGMEM - BUG_ON((page - mem_map) >= max_mapnr); -#endif if (!pfn_valid(page_to_pfn(page)) || PageReserved(page)) return; @@ -549,14 +524,9 @@ void __pte_chain_free(struct pte_chain * if (pte_chain->next_and_idx) pte_chain->next_and_idx = 0; pte_chainp = &per_cpu(local_pte_chain, cpu); - if (*pte_chainp) { - int k; - char *s = (char *)*pte_chainp; - - for (k = 0; k < sizeof(**pte_chainp); ++k) - BUG_ON(s[k]); + if (*pte_chainp) kmem_cache_free(pte_chain_cache, *pte_chainp); - } + *pte_chainp = pte_chain; put_cpu(); } @@ -590,18 +560,7 @@ struct pte_chain *pte_chain_alloc(int gf put_cpu(); ret = kmem_cache_alloc(pte_chain_cache, gfp_flags); } - if (ret) { - char *s = (char *)ret; - int k; - for (k = 0; k < sizeof(*ret); ++k) - BUG_ON(s[k]); - } - - if (ret) { - BUG_ON((unsigned long)ret < PAGE_OFFSET); - BUG_ON((unsigned long)ret > (unsigned long)(-PAGE_SIZE)); - } return ret; } diff -prauN pgcl-2.5.70-bk14-2/net/ipv4/route.c pgcl-2.5.70-bk14-3/net/ipv4/route.c --- pgcl-2.5.70-bk14-2/net/ipv4/route.c 2003-06-09 15:58:26.000000000 -0700 +++ pgcl-2.5.70-bk14-3/net/ipv4/route.c 2003-06-10 00:35:20.000000000 -0700 @@ -513,11 +513,6 @@ static void rt_run_flush(unsigned long d for (i = rt_hash_mask; i >= 0; i--) { spin_lock_bh(&rt_hash_table[i].lock); rth = rt_hash_table[i].chain; - if (rth && (unsigned long)rth < PAGE_OFFSET) { - WARN_ON(1); - rth = NULL; - rt_hash_table[i].chain = NULL; - } if (rth) rt_hash_table[i].chain = NULL; spin_unlock_bh(&rt_hash_table[i].lock); @@ -648,12 +643,6 @@ static int rt_garbage_collect(void) for (i = rt_hash_mask, k = rover; i >= 0; i--) { unsigned long tmo = expire; - if (rt_hash_table[k].chain && - (unsigned long)rt_hash_table[k].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[k].chain = NULL; - } - k = (k + 1) & rt_hash_mask; rthp = &rt_hash_table[k].chain; spin_lock_bh(&rt_hash_table[k].lock); @@ -733,11 +722,6 @@ static int rt_intern_hash(unsigned hash, int attempts = !in_softirq(); restart: - if (rt_hash_table[hash].chain && - (unsigned long)rt_hash_table[hash].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[hash].chain = NULL; - } rthp = &rt_hash_table[hash].chain; spin_lock_bh(&rt_hash_table[hash].lock); @@ -818,7 +802,6 @@ restart: printk("\n"); } #endif - BUG_ON(rt && (unsigned long)rt < PAGE_OFFSET); rt_hash_table[hash].chain = rt; spin_unlock_bh(&rt_hash_table[hash].lock); *rp = rt; @@ -889,11 +872,6 @@ static void rt_del(unsigned hash, struct spin_lock_bh(&rt_hash_table[hash].lock); ip_rt_put(rt); - if (rt_hash_table[hash].chain && - (unsigned long)rt_hash_table[hash].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[hash].chain = NULL; - } for (rthp = &rt_hash_table[hash].chain; *rthp; rthp = &(*rthp)->u.rt_next) if (*rthp == rt) { @@ -938,11 +916,6 @@ void ip_rt_redirect(u32 old_gw, u32 dadd skeys[i] ^ (ikeys[k] << 5), tos); - if (rt_hash_table[hash].chain && - (unsigned long)rt_hash_table[hash].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[hash].chain = NULL; - } rthp=&rt_hash_table[hash].chain; rcu_read_lock(); @@ -1199,11 +1172,6 @@ unsigned short ip_rt_frag_needed(struct unsigned hash = rt_hash_code(daddr, skeys[i], tos); rcu_read_lock(); - if (rt_hash_table[hash].chain && - (unsigned long)rt_hash_table[hash].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[hash].chain = NULL; - } for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) { smp_read_barrier_depends(); @@ -1800,11 +1768,6 @@ int ip_route_input(struct sk_buff *skb, hash = rt_hash_code(daddr, saddr ^ (iif << 5), tos); rcu_read_lock(); - if (rt_hash_table[hash].chain && - (unsigned long)rt_hash_table[hash].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[hash].chain = NULL; - } for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) { smp_read_barrier_depends(); if (rth->fl.fl4_dst == daddr && @@ -2171,11 +2134,6 @@ int __ip_route_output_key(struct rtable hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5), flp->fl4_tos); rcu_read_lock(); - if (rt_hash_table[hash].chain && - (unsigned long)rt_hash_table[hash].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[hash].chain = NULL; - } for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) { smp_read_barrier_depends(); if (rth->fl.fl4_dst == flp->fl4_dst && @@ -2407,11 +2365,6 @@ int ip_rt_dump(struct sk_buff *skb, str if (h > s_h) s_idx = 0; rcu_read_lock(); - if (rt_hash_table[h].chain && - (unsigned long)rt_hash_table[h].chain < PAGE_OFFSET) { - WARN_ON(1); - rt_hash_table[h].chain = NULL; - } for (rt = rt_hash_table[h].chain, idx = 0; rt; rt = rt->u.rt_next, idx++) { smp_read_barrier_depends(); @@ -2683,31 +2636,6 @@ static int ip_rt_acct_read(char *buffer, #endif /* CONFIG_PROC_FS */ #endif /* CONFIG_NET_CLS_ROUTE */ -void check_rt_hash(void) -{ - int k; - rcu_read_lock(); - for (k = 0; k <= rt_hash_mask; ++k) { - if (!rt_hash_table[k].chain) - continue; - if ((unsigned long)rt_hash_table[k].chain > PAGE_OFFSET - && (unsigned long)rt_hash_table[k].chain < (unsigned long)(-PAGE_SIZE)) - continue; - dump_stack(); - rt_hash_table[k].chain = NULL; - } - rcu_read_unlock(); -} - -int check_rt_addr(void *addr) -{ - unsigned long vaddr = (unsigned long)addr; - unsigned long hash = (unsigned long)((void *)&rt_hash_table[0]); - if (vaddr < hash) - return 0; - return vaddr - hash <= sizeof(struct rt_hash_bucket)*rt_hash_mask; -} - int __init ip_rt_init(void) { int i, order, goal, rc = 0; @@ -2759,8 +2687,6 @@ int __init ip_rt_init(void) rt_hash_mask--; - WARN_ON(sizeof(struct rt_hash_bucket)*rt_hash_mask>=(PAGE_SIZE<