diff -urN rest-ref/arch/alpha/kernel/traps.c rest/arch/alpha/kernel/traps.c --- rest-ref/arch/alpha/kernel/traps.c Tue Jan 22 18:55:42 2002 +++ rest/arch/alpha/kernel/traps.c Fri Apr 26 07:52:17 2002 @@ -167,6 +167,11 @@ dik_show_trace(sp); } +void dump_stack(void) +{ + show_stack(NULL); +} + void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { diff -urN rest-ref/fs/buffer.c rest/fs/buffer.c --- rest-ref/fs/buffer.c Fri Apr 26 07:51:56 2002 +++ rest/fs/buffer.c Fri Apr 26 07:52:17 2002 @@ -47,7 +47,6 @@ #include #include #include -#include #include #include @@ -3014,16 +3013,6 @@ complete((struct completion *)startup); - /* - * FIXME: The ndirty logic here is wrong. It's supposed to - * send bdflush back to sleep after writing ndirty buffers. - * In fact, the test is wrong so bdflush will in fact - * sleep when bdflush_stop() returns true. - * - * FIXME: If it proves useful to implement ndirty properly, - * then perhaps the value of ndirty should be scaled by the - * amount of memory in the machine. - */ for (;;) { int ndirty = bdf_prm.b_un.ndirty; diff -urN rest-ref/include/linux/mm.h rest/include/linux/mm.h --- rest-ref/include/linux/mm.h Fri Apr 26 07:51:56 2002 +++ rest/include/linux/mm.h Fri Apr 26 07:52:17 2002 @@ -168,9 +168,8 @@ * we can simply calculate the virtual address. On machines with * highmem some memory is mapped into kernel virtual memory * dynamically, so we need a place to store that address. - * Note that this field could be 16 bits on x86 ... ;) * - * Architectures with slow multiplication can define + * Architectures with slow ALU can define * WANT_PAGE_VIRTUAL in asm/page.h */ #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) @@ -311,6 +310,7 @@ #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) #define PageChecked(page) test_bit(PG_checked, &(page)->flags) #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) + #define PageLaunder(page) test_bit(PG_launder, &(page)->flags) #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) @@ -348,24 +348,18 @@ do { \ (page)->virtual = (address); \ } while(0) - -#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ -#define set_page_address(page, address) do { } while(0) -#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ - -/* - * Permanent address of a page. Obviously must never be - * called on a highmem page. - */ -#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) - #define page_address(page) ((page)->virtual) #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ +#define set_page_address(page, address) do { } while(0) +#ifdef CONFIG_DISCONTIGMEM #define page_address(page) \ __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \ + page_zone(page)->zone_start_paddr) +#else +#define page_address(page) __va((page - mem_map) << PAGE_SHIFT) +#endif #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ @@ -464,6 +458,8 @@ #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr),0) +extern int start_aggressive_readahead(unsigned int); + extern void show_free_areas(void); extern void show_free_areas_node(pg_data_t *pgdat); @@ -528,8 +524,8 @@ return page_count(page) - !!page->buffers == 1; } -extern int can_share_swap_page(struct page *); -extern int remove_exclusive_swap_page(struct page *); +extern int FASTCALL(make_exclusive_page(struct page *, int)); +extern int FASTCALL(remove_exclusive_swap_page(struct page *)); extern void __free_pte(pte_t); diff -urN rest-ref/include/linux/mmzone.h rest/include/linux/mmzone.h --- rest-ref/include/linux/mmzone.h Fri Apr 26 07:51:56 2002 +++ rest/include/linux/mmzone.h Fri Apr 26 07:52:17 2002 @@ -19,6 +19,11 @@ #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER #endif +#define ZONE_DMA 0 +#define ZONE_NORMAL 1 +#define ZONE_HIGHMEM 2 +#define MAX_NR_ZONES 3 + typedef struct free_area_struct { struct list_head free_list; unsigned long *map; @@ -26,6 +31,10 @@ struct pglist_data; +typedef struct zone_watermarks_s { + unsigned long min, low, high; +} zone_watermarks_t; + /* * On machines where it is needed (eg PCs) we divide physical memory * into multiple physical zones. On a PC we have 3 zones: @@ -40,7 +49,15 @@ */ spinlock_t lock; unsigned long free_pages; - unsigned long pages_min, pages_low, pages_high; + + /* + * We don't know if the memory that we're going to allocate will be freeable + * or/and it will be released eventually, so to avoid totally wasting several + * GB of ram we must reserve some of the lower zone memory (otherwise we risk + * to run OOM on the lower zones despite there's tons of freeable ram + * on the higher zones). + */ + zone_watermarks_t watermarks[MAX_NR_ZONES]; /* * The below fields are protected by different locks (or by @@ -60,35 +77,6 @@ free_area_t free_area[MAX_ORDER]; /* - * wait_table -- the array holding the hash table - * wait_table_size -- the size of the hash table array - * wait_table_shift -- wait_table_size - * == BITS_PER_LONG (1 << wait_table_bits) - * - * The purpose of all these is to keep track of the people - * waiting for a page to become available and make them - * runnable again when possible. The trouble is that this - * consumes a lot of space, especially when so few things - * wait on pages at a given time. So instead of using - * per-page waitqueues, we use a waitqueue hash table. - * - * The bucket discipline is to sleep on the same queue when - * colliding and wake all in that wait queue when removing. - * When something wakes, it must check to be sure its page is - * truly available, a la thundering herd. The cost of a - * collision is great, but given the expected load of the - * table, they should be so rare as to be outweighed by the - * benefits from the saved space. - * - * __wait_on_page() and unlock_page() in mm/filemap.c, are the - * primary users of these fields, and in mm/page_alloc.c - * free_area_init_core() performs the initialization of them. - */ - wait_queue_head_t * wait_table; - unsigned long wait_table_size; - unsigned long wait_table_shift; - - /* * Discontig memory support fields. */ struct pglist_data *zone_pgdat; @@ -101,13 +89,9 @@ */ char *name; unsigned long size; + unsigned long realsize; } zone_t; -#define ZONE_DMA 0 -#define ZONE_NORMAL 1 -#define ZONE_HIGHMEM 2 -#define MAX_NR_ZONES 3 - /* * One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the @@ -125,6 +109,32 @@ #define GFP_ZONEMASK 0x0f +typedef struct wait_table_s { + /* + * The purpose of all these is to keep track of the people + * waiting for a page to become available and make them + * runnable again when possible. The trouble is that this + * consumes a lot of space, especially when so few things + * wait on pages at a given time. So instead of using + * per-page waitqueues, we use a waitqueue hash table. + * + * The bucket discipline is to sleep on the same queue when + * colliding and wake all in that wait queue when removing. + * When something wakes, it must check to be sure its page is + * truly available, a la thundering herd. The cost of a + * collision is great, but given the expected load of the + * table, they should be so rare as to be outweighed by the + * benefits from the saved space. + * + * __wait_on_page() and unlock_page() in mm/filemap.c, are the + * primary users of these fields, and in mm/page_alloc.c + * free_area_init_core() performs the initialization of them. + */ + wait_queue_head_t * head; + unsigned long shift; + unsigned long size; +} wait_table_t; + /* * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM * (mostly NUMA machines?) to denote a higher-level memory zone than the @@ -148,14 +158,15 @@ unsigned long node_start_mapnr; unsigned long node_size; int node_id; + wait_table_t wait_table; struct pglist_data *node_next; } pg_data_t; extern int numnodes; extern pg_data_t *pgdat_list; -#define memclass(pgzone, classzone) (((pgzone)->zone_pgdat == (classzone)->zone_pgdat) \ - && ((pgzone) <= (classzone))) +#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) +#define memclass(pgzone, classzone) (zone_idx(pgzone) <= zone_idx(classzone)) /* * The following two are not meant for general usage. They are here as diff -urN rest-ref/include/linux/sched.h rest/include/linux/sched.h --- rest-ref/include/linux/sched.h Fri Apr 26 07:51:56 2002 +++ rest/include/linux/sched.h Fri Apr 26 07:52:17 2002 @@ -286,13 +286,9 @@ struct zone_struct; -/* - * Used when a task if trying to free some pages for its own - * use - to prevent other tasks/CPUs from stealing the just-freed - * pages. - */ -struct local_page { - struct page *page; +struct local_pages { + struct list_head list; + unsigned int order, nr; struct zone_struct * classzone; }; @@ -341,7 +337,7 @@ struct task_struct *next_task, *prev_task; struct mm_struct *active_mm; - struct local_page local_page; + struct local_pages local_pages; /* task state */ struct linux_binfmt *binfmt; diff -urN rest-ref/include/linux/sysctl.h rest/include/linux/sysctl.h --- rest-ref/include/linux/sysctl.h Fri Apr 26 07:51:56 2002 +++ rest/include/linux/sysctl.h Fri Apr 26 07:52:17 2002 @@ -143,12 +143,13 @@ VM_MAX_MAP_COUNT=11, /* int: Maximum number of active map areas */ VM_MIN_READAHEAD=12, /* Min file readahead */ VM_MAX_READAHEAD=13, /* Max file readahead */ - VM_VFS_SCAN_RATIO=14, /* part of the inactive vfs lists to scan */ - VM_LRU_BALANCE_RATIO=15,/* balance active and inactive caches */ - VM_PASSES=16, /* number of vm passes before failing */ - VM_GFP_DEBUG=17, /* debug GFP failures */ - VM_CACHE_SCAN_RATIO=18, /* part of the inactive cache list to scan */ - VM_MAPPED_RATIO=19, /* amount of unfreeable pages that triggers swapout */ + VM_HEAP_STACK_GAP=14, /* int: page gap between heap and stack */ + VM_VFS_SCAN_RATIO=15, /* part of the inactive vfs lists to scan */ + VM_LRU_BALANCE_RATIO=16,/* balance active and inactive caches */ + VM_PASSES=17, /* number of vm passes before failing */ + VM_GFP_DEBUG=18, /* debug GFP failures */ + VM_CACHE_SCAN_RATIO=19, /* part of the inactive cache list to scan */ + VM_MAPPED_RATIO=20, /* amount of unfreeable pages that triggers swapout */ }; diff -urN rest-ref/kernel/fork.c rest/kernel/fork.c --- rest-ref/kernel/fork.c Fri Apr 26 07:51:56 2002 +++ rest/kernel/fork.c Fri Apr 26 07:52:17 2002 @@ -698,7 +698,7 @@ p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; - p->local_page.page = NULL; + INIT_LIST_HEAD(&p->local_pages.list); retval = -ENOMEM; /* copy all the process information */ diff -urN rest-ref/kernel/ksyms.c rest/kernel/ksyms.c --- rest-ref/kernel/ksyms.c Fri Apr 26 07:51:56 2002 +++ rest/kernel/ksyms.c Fri Apr 26 07:52:17 2002 @@ -90,6 +90,7 @@ EXPORT_SYMBOL(exit_sighand); /* internal kernel memory management */ +EXPORT_SYMBOL(start_aggressive_readahead); EXPORT_SYMBOL(_alloc_pages); EXPORT_SYMBOL(__alloc_pages); EXPORT_SYMBOL(alloc_pages_node); diff -urN rest-ref/mm/filemap.c rest/mm/filemap.c --- rest-ref/mm/filemap.c Fri Apr 26 07:52:03 2002 +++ rest/mm/filemap.c Fri Apr 26 07:52:17 2002 @@ -739,25 +739,14 @@ return 0; } -/* - * Knuth recommends primes in approximately golden ratio to the maximum - * integer representable by a machine word for multiplicative hashing. - * Chuck Lever verified the effectiveness of this technique: - * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf - * - * These primes are chosen to be bit-sparse, that is operations on - * them can use shifts and additions instead of multiplications for - * machines where multiplications are slow. - */ -#if BITS_PER_LONG == 32 -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define GOLDEN_RATIO_PRIME 0x9e370001UL -#elif BITS_PER_LONG == 64 -/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ -#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL -#else -#error Define GOLDEN_RATIO_PRIME for your wordsize. -#endif +static inline wait_queue_head_t * wait_table_hashfn(struct page * page, wait_table_t * wait_table) +{ +#define i (((unsigned long) page)/(sizeof(struct page) & ~ (sizeof(struct page) - 1))) +#define s(x) ((x)+((x)>>wait_table->shift)) + return wait_table->head + (s(i) & (wait_table->size-1)); +#undef i +#undef s +} /* * In order to wait for pages to become available there must be @@ -769,35 +758,10 @@ * at a cost of "thundering herd" phenomena during rare hash * collisions. */ -static inline wait_queue_head_t *page_waitqueue(struct page *page) +static inline wait_queue_head_t * page_waitqueue(struct page *page) { - const zone_t *zone = page_zone(page); - wait_queue_head_t *wait = zone->wait_table; - unsigned long hash = (unsigned long)page; - -#if BITS_PER_LONG == 64 - /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ - unsigned long n = hash; - n <<= 18; - hash -= n; - n <<= 33; - hash -= n; - n <<= 3; - hash += n; - n <<= 3; - hash -= n; - n <<= 4; - hash += n; - n <<= 2; - hash += n; -#else - /* On some cpus multiply is faster, on others gcc will do shifts */ - hash *= GOLDEN_RATIO_PRIME; -#endif - - hash >>= zone->wait_table_shift; - - return &wait[hash]; + pg_data_t * pgdat = page_zone(page)->zone_pgdat; + return wait_table_hashfn(page, &pgdat->wait_table); } /* @@ -837,7 +801,7 @@ BUG(); smp_mb__after_clear_bit(); if (waitqueue_active(waitqueue)) - wake_up_all(waitqueue); + wake_up(waitqueue); } /* @@ -850,7 +814,7 @@ struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); - add_wait_queue_exclusive(waitqueue, &wait); + add_wait_queue(waitqueue, &wait); for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (PageLocked(page)) { diff -urN rest-ref/mm/memory.c rest/mm/memory.c --- rest-ref/mm/memory.c Fri Apr 26 07:51:42 2002 +++ rest/mm/memory.c Fri Apr 26 07:52:17 2002 @@ -965,15 +965,11 @@ if (!VALID_PAGE(old_page)) goto bad_wp_page; - if (!TryLockPage(old_page)) { - int reuse = can_share_swap_page(old_page); - unlock_page(old_page); - if (reuse) { - flush_cache_page(vma, address); - establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); - spin_unlock(&mm->page_table_lock); - return 1; /* Minor fault */ - } + if (make_exclusive_page(old_page, 1)) { + flush_cache_page(vma, address); + establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); + spin_unlock(&mm->page_table_lock); + return 1; /* Minor fault */ } /* @@ -991,6 +987,19 @@ * Re-check the pte - we dropped the lock */ spin_lock(&mm->page_table_lock); + /* + * keep the page pinned until we return runnable + * to avoid another thread to skip the break_cow + * path, so we're sure pte_same below check also implys + * that the _contents_ of the old_page didn't changed + * under us (not only that the pagetable is the same). + * + * Since we have the page_table_lock acquired here, if the + * pte is the same it means we're still holding an additional + * reference on the old_page so we can safely + * page_cache_release(old_page) before the "pte_same == true" path. + */ + page_cache_release(old_page); if (pte_same(*page_table, pte)) { if (PageReserved(old_page)) ++mm->rss; @@ -1002,7 +1011,6 @@ } spin_unlock(&mm->page_table_lock); page_cache_release(new_page); - page_cache_release(old_page); return 1; /* Minor fault */ bad_wp_page: @@ -1155,9 +1163,8 @@ ret = 2; } - mark_page_accessed(page); - - lock_page(page); + if (!Page_Uptodate(page)) + wait_on_page(page); /* * Back out if somebody else faulted in this pte while we @@ -1166,7 +1173,6 @@ spin_lock(&mm->page_table_lock); if (!pte_same(*page_table, orig_pte)) { spin_unlock(&mm->page_table_lock); - unlock_page(page); page_cache_release(page); return 1; } @@ -1174,14 +1180,15 @@ /* The page isn't present yet, go ahead with the fault. */ swap_free(entry); - if (vm_swap_full()) - remove_exclusive_swap_page(page); - mm->rss++; pte = mk_pte(page, vma->vm_page_prot); - if (write_access && can_share_swap_page(page)) - pte = pte_mkdirty(pte_mkwrite(pte)); - unlock_page(page); + if (make_exclusive_page(page, write_access)) { + if (write_access) + pte = pte_mkdirty(pte); + if (vma->vm_flags & VM_WRITE) + pte = pte_mkwrite(pte); + } + mark_page_accessed(page); flush_page_to_ram(page); flush_icache_page(vma, page); @@ -1219,15 +1226,14 @@ spin_lock(&mm->page_table_lock); if (!pte_none(*page_table)) { - page_cache_release(page); spin_unlock(&mm->page_table_lock); + page_cache_release(page); return 1; } mm->rss++; flush_page_to_ram(page); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); lru_cache_add(page); - mark_page_accessed(page); } set_pte(page_table, entry); @@ -1306,9 +1312,9 @@ entry = pte_mkwrite(pte_mkdirty(entry)); set_pte(page_table, entry); } else { + spin_unlock(&mm->page_table_lock); /* One of our sibling threads was faster, back out. */ page_cache_release(new_page); - spin_unlock(&mm->page_table_lock); return 1; } diff -urN rest-ref/mm/page_alloc.c rest/mm/page_alloc.c --- rest-ref/mm/page_alloc.c Fri Apr 26 07:52:03 2002 +++ rest/mm/page_alloc.c Fri Apr 26 07:54:02 2002 @@ -38,6 +38,7 @@ static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, }; static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, }; static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, }; +static int lower_zone_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 }; int vm_gfp_debug = 0; @@ -124,7 +125,7 @@ BUG(); page->flags &= ~((1<flags & PF_FREE_PAGES) + if (current->flags & PF_FREE_PAGES) goto local_freelist; back_local_freelist: @@ -177,12 +178,14 @@ return; local_freelist: - if ((current->local_page.page) || - !memclass(page_zone(page), current->local_page.classzone) || + if ((current->local_pages.nr && !current->local_pages.order) || + !memclass(page_zone(page), current->local_pages.classzone) || in_interrupt()) goto back_local_freelist; - current->local_page.page = page; + list_add(&page->list, ¤t->local_pages.list); + page->index = order; + current->local_pages.nr++; } #define MARK_USED(index, order, area) \ @@ -267,75 +270,101 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask, unsigned int order, int * freed) { struct page * page = NULL; - int __freed = 0; + int __freed; if (in_interrupt()) BUG(); - if (current->local_page.page) - BUG(); - current->local_page.classzone = classzone; + current->local_pages.order = order; + current->local_pages.classzone = classzone; current->flags |= PF_MEMALLOC | PF_FREE_PAGES; __freed = try_to_free_pages(classzone, gfp_mask, order); current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES); - if (current->local_page.page) { - page = current->local_page.page; - current->local_page.page = NULL; - - if (order != 0) { - /* The local page won't suit */ - __free_pages_ok(page, 0); - page = NULL; - goto out; + if (current->local_pages.nr) { + struct list_head * entry, * local_pages; + struct page * tmp; + int nr_pages; + + local_pages = ¤t->local_pages.list; + + if (likely(__freed)) { + /* pick from the last inserted so we're lifo */ + entry = local_pages->next; + do { + tmp = list_entry(entry, struct page, list); + if (!memclass(page_zone(tmp), classzone)) + BUG(); + if (tmp->index == order) { + list_del(entry); + current->local_pages.nr--; + set_page_count(tmp, 1); + page = tmp; + + if (page->buffers) + BUG(); + if (page->mapping) + BUG(); + if (!VALID_PAGE(page)) + BUG(); + if (PageSwapCache(page)) + BUG(); + if (PageLocked(page)) + BUG(); + if (PageLRU(page)) + BUG(); + if (PageActive(page)) + BUG(); + if (PageDirty(page)) + BUG(); + + break; + } + } while ((entry = entry->next) != local_pages); } - if (!memclass(page_zone(page), classzone)) - BUG(); - set_page_count(page, 1); - if (page->buffers) - BUG(); - if (page->mapping) - BUG(); - if (!VALID_PAGE(page)) - BUG(); - if (PageSwapCache(page)) - BUG(); - if (PageLocked(page)) - BUG(); - if (PageLRU(page)) - BUG(); - if (PageActive(page)) - BUG(); - if (PageDirty(page)) - BUG(); + + nr_pages = current->local_pages.nr; + /* free in reverse order so that the global order will be lifo */ + while ((entry = local_pages->prev) != local_pages) { + list_del(entry); + tmp = list_entry(entry, struct page, list); + __free_pages_ok(tmp, tmp->index); + if (!nr_pages--) + BUG(); + } + current->local_pages.nr = 0; } -out: *freed = __freed; return page; } +static inline unsigned long zone_free_pages(zone_t * zone, unsigned int order) +{ + long free = zone->free_pages - (1UL << order); + return free >= 0 ? free : 0; +} + /* * This is the 'heart' of the zoned buddy allocator: */ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist) { - unsigned long min; zone_t **zone, * classzone; struct page * page; - int freed; + int freed, class_idx; zone = zonelist->zones; classzone = *zone; - min = 1UL << order; + class_idx = zone_idx(classzone); + for (;;) { zone_t *z = *(zone++); if (!z) break; - min += z->pages_low; - if (z->free_pages > min) { + if (zone_free_pages(z, order) > z->watermarks[class_idx].low) { page = rmqueue(z, order); if (page) return page; @@ -348,18 +377,16 @@ wake_up_interruptible(&kswapd_wait); zone = zonelist->zones; - min = 1UL << order; for (;;) { - unsigned long local_min; + unsigned long min; zone_t *z = *(zone++); if (!z) break; - local_min = z->pages_min; + min = z->watermarks[class_idx].min; if (!(gfp_mask & __GFP_WAIT)) - local_min >>= 2; - min += local_min; - if (z->free_pages > min) { + min >>= 2; + if (zone_free_pages(z, order) > min) { page = rmqueue(z, order); if (page) return page; @@ -368,8 +395,7 @@ /* here we're in the low on memory slow path */ -rebalance: - if (current->flags & PF_MEMALLOC) { + if (current->flags & PF_MEMALLOC && !in_interrupt()) { zone = zonelist->zones; for (;;) { zone_t *z = *(zone++); @@ -385,36 +411,51 @@ /* Atomic allocations - we can't balance anything */ if (!(gfp_mask & __GFP_WAIT)) - return NULL; + goto out; + rebalance: page = balance_classzone(classzone, gfp_mask, order, &freed); if (page) return page; zone = zonelist->zones; - min = 1UL << order; - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; + if (likely(freed)) { + for (;;) { + zone_t *z = *(zone++); + if (!z) + break; - min += z->pages_min; - if (z->free_pages > min) { - page = rmqueue(z, order); - if (page) - return page; + if (zone_free_pages(z, order) > z->watermarks[class_idx].min) { + page = rmqueue(z, order); + if (page) + return page; + } } - } + goto rebalance; + } else { + /* + * Check that no other task is been killed meanwhile, + * in such a case we can succeed the allocation. + */ + for (;;) { + zone_t *z = *(zone++); + if (!z) + break; - /* Don't let big-order allocations loop */ - if (order > 3) - return NULL; + if (zone_free_pages(z, order) > z->watermarks[class_idx].high) { + page = rmqueue(z, order); + if (page) + return page; + } + } + } - /* Yield for kswapd, and try again */ - current->policy |= SCHED_YIELD; - __set_current_state(TASK_RUNNING); - schedule(); - goto rebalance; + out: + printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed (gfp=0x%x/%i)\n", + order, gfp_mask, !!(current->flags & PF_MEMALLOC)); + if (unlikely(vm_gfp_debug)) + dump_stack(); + return NULL; } /* @@ -528,18 +569,25 @@ { pg_data_t *pgdat = pgdat_list; unsigned int sum = 0; + zonelist_t *zonelist; + zone_t **zonep, *zone; do { - zonelist_t *zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK); - zone_t **zonep = zonelist->zones; - zone_t *zone; + int class_idx; + zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK); + zonep = zonelist->zones; + zone = *zonep; + class_idx = zone_idx(zone); - for (zone = *zonep++; zone; zone = *zonep++) { - unsigned long size = zone->size; - unsigned long high = zone->pages_high; - if (size > high) - sum += size - high; - } + sum += zone->nr_cache_pages; + do { + unsigned int free = zone->free_pages - zone->watermarks[class_idx].high; + zonep++; + zone = *zonep; + if (free <= 0) + continue; + sum += free; + } while (zone); pgdat = pgdat->node_next; } while (pgdat); @@ -561,6 +609,41 @@ } #endif +/* + * If it returns non zero it means there's lots of ram "free" + * (note: not in cache!) so any caller will know that + * he can allocate some memory to do some more aggressive + * (possibly wasteful) readahead. The state of the memory + * should be rechecked after every few pages allocated for + * doing this aggressive readahead. + * + * The gfp_mask parameter specifies in which kind of memory + * the readahead information will be applocated to. + */ +int start_aggressive_readahead(unsigned int gfp_mask) +{ + pg_data_t *pgdat = pgdat_list; + zonelist_t *zonelist; + zone_t **zonep, *zone; + int ret = 0; + + do { + int class_idx; + zonelist = pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK); + zonep = zonelist->zones; + zone = *(zonep++); + class_idx = zone_idx(zone); + + for (; zone; zone = *(zonep++)) + if (zone->free_pages > zone->watermarks[class_idx].high * 2) + ret = 1; + + pgdat = pgdat->node_next; + } while (pgdat); + + return ret; +} + int try_to_free_pages_nozone(unsigned int gfp_mask) { pg_data_t *pgdat = pgdat_list; @@ -606,13 +689,9 @@ zone_t *zone; for (zone = tmpdat->node_zones; zone < tmpdat->node_zones + MAX_NR_ZONES; zone++) - printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB " - "high:%6lukB\n", + printk("Zone:%s freepages:%6lukB\n", zone->name, - K(zone->free_pages), - K(zone->pages_min), - K(zone->pages_low), - K(zone->pages_high)); + K(zone->free_pages)); tmpdat = tmpdat->node_next; } @@ -719,33 +798,45 @@ */ #define PAGES_PER_WAITQUEUE 256 -static inline unsigned long wait_table_size(unsigned long pages) +static inline unsigned long wait_table_size(unsigned long pages, unsigned long * shift) { unsigned long size = 1; + unsigned long __shift = 0; pages /= PAGES_PER_WAITQUEUE; - while (size < pages) + while (size < pages) { size <<= 1; + __shift++; + } /* - * Once we have dozens or even hundreds of threads sleeping - * on IO we've got bigger problems than wait queue collision. - * Limit the size of the wait table to a reasonable size. + * The usage pattern of the queues depends mostly on the I/O, + * not much of the ram size of the machine, so make sure the + * array is large enough on lowmem nodes too. */ - size = min(size, 4096UL); + size = max(size, 256UL); + *shift = max(__shift, 8UL); return size; } /* - * This is an integer logarithm so that shifts can be used later - * to extract the more random high bits from the multiplicative - * hash function before the remainder is taken. + * The per-node waitqueue mechanism uses hashed waitqueues + * per zone. */ -static inline unsigned long wait_table_bits(unsigned long size) +static inline void wait_table_init(pg_data_t *pgdat) { - return ffz(~size); + unsigned long shift, size, i; + + size = wait_table_size(pgdat->node_size, &shift); + + pgdat->wait_table.size = size; + pgdat->wait_table.shift = shift; + pgdat->wait_table.head = (wait_queue_head_t *) alloc_bootmem_node(pgdat, size * sizeof(wait_queue_head_t)); + + for(i = 0; i < size; i++) + init_waitqueue_head(pgdat->wait_table.head + i); } #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) @@ -799,11 +890,14 @@ pgdat->node_start_mapnr = (lmem_map - mem_map); pgdat->nr_zones = 0; + wait_table_init(pgdat); + offset = lmem_map - mem_map; for (j = 0; j < MAX_NR_ZONES; j++) { zone_t *zone = pgdat->node_zones + j; unsigned long mask; unsigned long size, realsize; + int idx; zone_table[nid * MAX_NR_ZONES + j] = zone; realsize = size = zones_size[j]; @@ -812,28 +906,16 @@ printk("zone(%lu): %lu pages.\n", j, size); zone->size = size; + zone->realsize = realsize; zone->name = zone_names[j]; zone->lock = SPIN_LOCK_UNLOCKED; zone->zone_pgdat = pgdat; zone->free_pages = 0; zone->need_balance = 0; + zone->nr_active_pages = zone->nr_inactive_pages = 0; if (!size) continue; - /* - * The per-page waitqueue mechanism uses hashed waitqueues - * per zone. - */ - zone->wait_table_size = wait_table_size(size); - zone->wait_table_shift = - BITS_PER_LONG - wait_table_bits(zone->wait_table_size); - zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, zone->wait_table_size - * sizeof(wait_queue_head_t)); - - for(i = 0; i < zone->wait_table_size; ++i) - init_waitqueue_head(zone->wait_table + i); - pgdat->nr_zones = j+1; mask = (realsize / zone_balance_ratio[j]); @@ -841,9 +923,29 @@ mask = zone_balance_min[j]; else if (mask > zone_balance_max[j]) mask = zone_balance_max[j]; - zone->pages_min = mask; - zone->pages_low = mask*2; - zone->pages_high = mask*3; + zone->watermarks[j].min = mask; + zone->watermarks[j].low = mask*2; + zone->watermarks[j].high = mask*3; + /* now set the watermarks of the lower zones in the "j" classzone */ + for (idx = j-1; idx >= 0; idx--) { + zone_t * lower_zone = pgdat->node_zones + idx; + unsigned long lower_zone_reserve; + if (!lower_zone->size) + continue; + + mask = lower_zone->watermarks[idx].min; + lower_zone->watermarks[j].min = mask; + lower_zone->watermarks[j].low = mask*2; + lower_zone->watermarks[j].high = mask*3; + + /* now the brainer part */ + lower_zone_reserve = realsize / lower_zone_reserve_ratio[idx]; + lower_zone->watermarks[j].min += lower_zone_reserve; + lower_zone->watermarks[j].low += lower_zone_reserve; + lower_zone->watermarks[j].high += lower_zone_reserve; + + realsize += lower_zone->realsize; + } zone->zone_mem_map = mem_map + offset; zone->zone_start_mapnr = offset; @@ -927,3 +1029,16 @@ } __setup("memfrac=", setup_mem_frac); + +static int __init setup_lower_zone_reserve(char *str) +{ + int j = 0; + + while (get_option(&str, &lower_zone_reserve_ratio[j++]) == 2); + printk("setup_lower_zone_reserve: "); + for (j = 0; j < MAX_NR_ZONES-1; j++) printk("%d ", lower_zone_reserve_ratio[j]); + printk("\n"); + return 1; +} + +__setup("lower_zone_reserve=", setup_lower_zone_reserve); diff -urN rest-ref/mm/swapfile.c rest/mm/swapfile.c --- rest-ref/mm/swapfile.c Tue Apr 16 08:12:09 2002 +++ rest/mm/swapfile.c Fri Apr 26 07:52:17 2002 @@ -226,6 +226,7 @@ * Check if we're the only user of a swap page, * when the page is locked. */ +static int FASTCALL(exclusive_swap_page(struct page *page)); static int exclusive_swap_page(struct page *page) { int retval = 0; @@ -239,12 +240,13 @@ if (p->swap_map[SWP_OFFSET(entry)] == 1) { /* Recheck the page count with the pagecache lock held.. */ spin_lock(&pagecache_lock); - if (page_count(page) - !!page->buffers == 2) + if (PageSwapCache(page) && page_count(page) - !!page->buffers == 2) retval = 1; spin_unlock(&pagecache_lock); } swap_info_put(p); } + return retval; } @@ -256,21 +258,42 @@ * work, but we opportunistically check whether * we need to get all the locks first.. */ -int can_share_swap_page(struct page *page) +int make_exclusive_page(struct page *page, int write) { int retval = 0; - if (!PageLocked(page)) - BUG(); switch (page_count(page)) { case 3: if (!page->buffers) break; /* Fallthrough */ case 2: + /* racy fastpath check */ if (!PageSwapCache(page)) break; - retval = exclusive_swap_page(page); + + if ((!write && !vm_swap_full()) || TryLockPage(page)) { + /* + * Don't remove the page from the swapcache if: + * - it was a read fault and... + * - the swap isn't full + * or if + * - we failed acquiring the page lock + * + * NOTE: if failed acquiring the lock we cannot remove the + * page from the swapcache, but still we can safely takeover + * the page if it's exclusive, see the swapcache check in + * the innermost critical section of exclusive_swap_page(). + */ + retval = exclusive_swap_page(page); + } else { + /* + * Here we've the page lock acquired and we're asked + * to try to drop this page from the swapcache. + */ + retval = remove_exclusive_swap_page(page); + unlock_page(page); + } break; case 1: if (PageReserved(page)) @@ -299,7 +322,7 @@ entry.val = page->index; p = swap_info_get(entry); - if (!p) + if (unlikely(!p)) return 0; /* Is the only swap cache user the cache itself? */ @@ -308,7 +331,11 @@ /* Recheck the page count with the pagecache lock held.. */ spin_lock(&pagecache_lock); if (page_count(page) - !!page->buffers == 2) { + if (page->buffers && !try_to_free_buffers(page, 0)) + /* an anonymous page cannot have page->buffers set */ + BUG(); __delete_from_swap_cache(page); + swap_entry_free(p, SWP_OFFSET(entry)); SetPageDirty(page); retval = 1; } @@ -316,11 +343,8 @@ } swap_info_put(p); - if (retval) { - block_flushpage(page, 0); - swap_free(entry); + if (retval) page_cache_release(page); - } return retval; } @@ -342,11 +366,7 @@ } if (page) { page_cache_get(page); - /* Only cache user (+us), or swap space full? Free it! */ - if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) { - delete_from_swap_cache(page); - SetPageDirty(page); - } + remove_exclusive_swap_page(page); UnlockPage(page); page_cache_release(page); } diff -urN rest-ref/mm/vmscan.c rest/mm/vmscan.c --- rest-ref/mm/vmscan.c Fri Apr 26 07:52:03 2002 +++ rest/mm/vmscan.c Fri Apr 26 07:52:17 2002 @@ -280,6 +280,7 @@ { unsigned long address; struct vm_area_struct* vma; + int tlb_flush = 0; /* * Find the proper vm-area after freezing the vma chain @@ -294,6 +295,7 @@ } vma = find_vma(mm, address); if (vma) { + tlb_flush = 1; if (address < vma->vm_start) address = vma->vm_start; @@ -312,6 +314,8 @@ out_unlock: spin_unlock(&mm->page_table_lock); + if (tlb_flush) + flush_tlb_mm(mm); return count; } @@ -729,11 +733,12 @@ static int check_classzone_need_balance(zone_t * classzone) { - zone_t * first_classzone; + zone_t * first_zone; + int class_idx = zone_idx(classzone); - first_classzone = classzone->zone_pgdat->node_zones; - while (classzone >= first_classzone) { - if (classzone->free_pages > classzone->pages_high) + first_zone = classzone->zone_pgdat->node_zones; + while (classzone >= first_zone) { + if (classzone->free_pages > classzone->watermarks[class_idx].high) return 0; classzone--; } @@ -749,12 +754,12 @@ zone = pgdat->node_zones + i; if (unlikely(current->need_resched)) schedule(); - if (!zone->need_balance) + if (!zone->need_balance || !zone->size) continue; if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) { zone->need_balance = 0; __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); + schedule_timeout(HZ*5); continue; } if (check_classzone_need_balance(zone)) @@ -787,7 +792,7 @@ for (i = pgdat->nr_zones-1; i >= 0; i--) { zone = pgdat->node_zones + i; - if (!zone->need_balance) + if (!zone->need_balance || !zone->size) continue; return 0; }