Index: linux-2.6.9/include/linux/sched.h
===================================================================
--- linux-2.6.9.orig/include/linux/sched.h	2004-12-01 10:37:31.000000000 -0800
+++ linux-2.6.9/include/linux/sched.h	2004-12-01 10:38:15.000000000 -0800
@@ -537,6 +537,8 @@
 #endif
 
 	struct list_head tasks;
+	unsigned long anon_fault_next_addr;	/* Predicted sequential fault address */
+	int anon_fault_order;			/* Last order of allocation on fault */
 	/*
 	 * ptrace_list/ptrace_children forms the list of my children
 	 * that were stolen by a ptracer.
Index: linux-2.6.9/mm/memory.c
===================================================================
--- linux-2.6.9.orig/mm/memory.c	2004-12-01 10:38:11.000000000 -0800
+++ linux-2.6.9/mm/memory.c	2004-12-01 10:45:01.000000000 -0800
@@ -55,6 +55,7 @@
 
 #include <linux/swapops.h>
 #include <linux/elf.h>
+#include <linux/pagevec.h>
 
 #ifndef CONFIG_DISCONTIGMEM
 /* use the per-pgdat data instead for discontigmem - mbligh */
@@ -1432,8 +1433,106 @@
 		unsigned long addr)
 {
 	pte_t entry;
-	struct page * page = ZERO_PAGE(addr);
+	struct page * page;
+	
+	addr &= PAGE_MASK;
+
+ 	if (current->anon_fault_next_addr == addr) {
+ 		unsigned long end_addr;
+ 		int order = current->anon_fault_order;
+ 
+		/* Sequence of page faults detected. Perform preallocation of pages */
 
+		/* The order of preallocations increases with each successful prediction */
+ 		order++;
+		
+		if ((1 << order) < PAGEVEC_SIZE)
+			end_addr = addr + (1 << (order + PAGE_SHIFT));
+		else
+			end_addr = addr + PAGEVEC_SIZE * PAGE_SIZE;
+
+		if (end_addr > vma->vm_end)
+			end_addr = vma->vm_end;
+		if ((addr & PMD_MASK) != (end_addr & PMD_MASK))
+			end_addr &= PMD_MASK;
+
+		current->anon_fault_next_addr = end_addr;
+	 	current->anon_fault_order = order;
+ 
+		if (write_access) {
+
+			struct pagevec pv;
+			unsigned long a;
+			struct page **p;
+
+			pte_unmap(page_table);
+			spin_unlock(&mm->page_table_lock);
+
+			pagevec_init(&pv, 0);
+
+			if (unlikely(anon_vma_prepare(vma)))
+				return VM_FAULT_OOM;
+
+			/* Allocate the necessary pages */
+			for(a = addr;a < end_addr ; a += PAGE_SIZE) {
+				struct page *p = alloc_page_vma(GFP_HIGHUSER, vma, a);
+
+				if (p) {
+					clear_user_highpage(p, a);
+					pagevec_add(&pv,p);
+				} else
+					break;
+			}
+			end_addr = a;
+
+			spin_lock(&mm->page_table_lock);
+
+ 			for(p = pv.pages; addr < end_addr; addr += PAGE_SIZE, p++) {
+
+				page_table = pte_offset_map(pmd, addr);
+				if (!pte_none(*page_table)) {
+					/* Someone else got there first */
+					page_cache_release(*p);
+					pte_unmap(page_table);
+					continue;
+				}
+				
+ 				entry = maybe_mkwrite(pte_mkdirty(mk_pte(*p,
+ 							 vma->vm_page_prot)),
+ 						      vma);
+
+				mm->rss++;
+				lru_cache_add_active(*p);
+				mark_page_accessed(*p);
+				page_add_anon_rmap(*p, vma, addr);
+
+				set_pte(page_table, entry);
+				pte_unmap(page_table);
+
+ 				/* No need to invalidate - it was non-present before */
+ 				update_mmu_cache(vma, addr, entry);
+			}
+ 		} else {
+ 			/* Read */
+ 			for(;addr < end_addr; addr += PAGE_SIZE) {
+				page_table = pte_offset_map(pmd, addr);
+ 				entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
+				set_pte(page_table, entry);
+				pte_unmap(page_table);
+
+ 				/* No need to invalidate - it was non-present before */
+				update_mmu_cache(vma, addr, entry);
+	
+			};
+		}
+		spin_unlock(&mm->page_table_lock);
+		return VM_FAULT_MINOR;
+	}
+
+	current->anon_fault_next_addr = addr + PAGE_SIZE;
+	current->anon_fault_order = 0;
+ 
+	page = ZERO_PAGE(addr);
 	/* Read-only mapping of ZERO_PAGE. */
 	entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));