diff -urN linux.orig/include/linux/sched.h linux/include/linux/sched.h
--- linux.orig/include/linux/sched.h	Wed May  9 13:12:44 2001
+++ linux/include/linux/sched.h	Wed May  9 14:51:17 2001
@@ -405,6 +405,9 @@
    	u32 self_exec_id;
 /* Protection of (de-)allocation: mm, files, fs, tty */
 	spinlock_t alloc_lock;
+	
+	int page_reservations;
+	
 };
 
 /*
diff -urN linux.orig/kernel/exit.c linux/kernel/exit.c
--- linux.orig/kernel/exit.c	Wed May  9 13:12:44 2001
+++ linux/kernel/exit.c	Wed May  9 14:51:17 2001
@@ -419,9 +419,14 @@
 	write_unlock_irq(&tasklist_lock);
 }
 
+
+extern void put_reservations(void);
+
 NORET_TYPE void do_exit(long code)
 {
 	struct task_struct *tsk = current;
+	
+	put_reservations();
 
 	if (in_interrupt())
 		panic("Aiee, killing interrupt handler!");
diff -urN linux.orig/kernel/fork.c linux/kernel/fork.c
--- linux.orig/kernel/fork.c	Wed May  9 13:12:45 2001
+++ linux/kernel/fork.c	Wed May  9 14:51:17 2001
@@ -630,6 +630,7 @@
 	p->tty_old_pgrp = 0;
 	p->times.tms_utime = p->times.tms_stime = 0;
 	p->times.tms_cutime = p->times.tms_cstime = 0;
+	p->page_reservations = 0;
 #ifdef CONFIG_SMP
 	{
 		int i;
diff -urN linux.orig/mm/Makefile linux/mm/Makefile
--- linux.orig/mm/Makefile	Wed May  9 13:12:43 2001
+++ linux/mm/Makefile	Wed May  9 14:51:17 2001
@@ -14,7 +14,7 @@
 obj-y	 := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
 	    vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
 	    page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
-	    shmem.o
+	    shmem.o reserve.o
 
 obj-$(CONFIG_HIGHMEM) += highmem.o
 
diff -urN linux.orig/mm/page_alloc.c linux/mm/page_alloc.c
--- linux.orig/mm/page_alloc.c	Wed May  9 13:12:46 2001
+++ linux/mm/page_alloc.c	Wed May  9 14:54:30 2001
@@ -61,6 +61,12 @@
  *
  * Hint: -mask = 1+~mask
  */
+ 
+extern int nr_reserved_pages;  
+extern int nr_reservations;    
+extern spinlock_t reservation_lock; 
+extern struct list_head reserved_pages;
+
 
 static void FASTCALL(__free_pages_ok (struct page *page, unsigned long order));
 static void __free_pages_ok (struct page *page, unsigned long order)
@@ -99,6 +105,14 @@
 
 	page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
 	page->age = PAGE_AGE_START;
+
+	if ( (order==0) && ((nr_reserved_pages - nr_reservations) < 32) ) {
+		spin_lock_irq(&reservation_lock);
+		list_add(&page->list,&reserved_pages);
+		nr_reserved_pages++;
+		spin_unlock_irq(&reservation_lock);
+		return;
+	}
 	
 	zone = page->zone;
 
@@ -304,6 +318,8 @@
 	return NULL;
 }
 
+extern struct page *get_reserved_page (void);
+
 
 /*
  * This is the 'heart' of the zoned buddy allocator:
@@ -320,6 +336,12 @@
 	 * Allocations put pressure on the VM subsystem.
 	 */
 	memory_pressure++;
+	
+	if ((order==0) && (!in_interrupt())) {
+		page = get_reserved_page();
+		if (page)
+			return page;
+	}
 
 	/*
 	 * (If anyone calls gfp from interrupts nonatomically then it
diff -urN linux.orig/mm/reserve.c linux/mm/reserve.c
--- linux.orig/mm/reserve.c	Wed Dec 31 19:00:00 1969
+++ linux/mm/reserve.c	Wed May  9 14:51:17 2001
@@ -0,0 +1,153 @@
+/*
+ * Memory reservations 
+ */
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+
+spinlock_t reservation_lock = SPIN_LOCK_UNLOCKED;
+
+#define VMRESERVE 8
+
+int nr_reserved_pages;	 /* pagecount in list */
+int nr_reservations;	 /* actually reserved pages */
+LIST_HEAD(reserved_pages);
+
+/* only to be called you are allowed to */
+struct page *__get_reserved_page (void)
+{
+	struct list_head *tmp;
+	struct page *page=NULL;
+	
+	tmp = &reserved_pages;
+	spin_lock_irq(&reservation_lock);
+	if (!list_empty(tmp)) {
+		page = list_entry(tmp->next, struct page, list);
+		list_del(tmp->next);
+		if (nr_reserved_pages>0)
+			nr_reserved_pages--;
+		if (nr_reservations>0)
+			nr_reservations--;
+		if (current->page_reservations>0)
+			current->page_reservations--;
+		
+	}
+	spin_unlock_irq(&emergency_lock);
+	
+	if (page==NULL)
+		printk("mm: too little reservations\n"); /* if this happends, someone is cheating */
+		
+	return page;
+}
+
+struct page *__get_reserved_page2 (void)
+{
+	struct list_head *tmp;
+	struct page *page=NULL;
+	
+	tmp = &reserved_pages;
+	spin_lock_irq(&reservation_lock);
+	if (!list_empty(tmp)) {
+		page = list_entry(tmp->next, struct page, list);
+		list_del(tmp->next);
+		if (nr_reserved_pages>0)
+			nr_reserved_pages--;
+		
+	}
+	spin_unlock_irq(&emergency_lock);
+	return page;
+}
+
+struct page *get_reserved_page (void)
+{
+	
+	if (in_interrupt())
+		return NULL;
+
+	if (!current->page_reservations)
+		return NULL;
+		
+	return __get_reserved_page();
+}
+
+static void add_page_pool(unsigned long gfp_mask)
+{
+	struct page *page;
+	
+	page = alloc_page(gfp_mask);
+	if (page==NULL)
+		return;
+	spin_lock_irq(&reservation_lock);
+	list_add(&page->list,&reserved_pages);
+	nr_reserved_pages++;
+	spin_unlock_irq(&reservation_lock);		
+}
+
+/* this sleeps until you get the memory */
+void make_reservation(int pages)
+{
+	int i,vmreserve;
+	
+	
+	/* first: give up any existing reservations */
+	put_reservations();
+	
+	/* step one: try to soft-allocate "pages" to the pool */
+	for (i=0;i<pages;i++)
+		add_page_pool(GFP_ATOMIC | __GFP_FAIL);
+
+
+	/* if we are kswapd / bdflush, we can use the emergency reserve */
+	vmreserve = VMRESERVE;
+	if (current->flags&PF_MEMALLOC)
+		vmreserve = 0;
+		
+	while (1) {
+		spin_lock_irq(&reservation_lock);
+		if ( (nr_reserved_pages - nr_reservations - vmreserve) > pages) {
+			nr_reservations += pages;
+			current->page_reservations = pages;
+			goto out;
+		}
+		spin_unlock_irq(&reservation_lock);
+		/* if we get here, we have to sleep while adding pages to the pool */
+		if (current->flags&PF_MEMALLOC)
+			printk("mm: Short on reservations \n");
+		add_page_pool(GFP_KERNEL);
+	}
+	
+	out:
+	spin_unlock_irq(&reservation_lock);
+
+}
+
+void put_reservations(void)
+{
+	if (current->page_reservations) {
+		int i;
+		i = xchg(&current->page_reservations,0);
+		spin_lock_irq(&reservation_lock);
+		nr_reservations -= i;
+		spin_unlock_irq(&reservation_lock);
+	}
+}
+
+void free_redundant_reservations(void)
+{
+	struct page *page;
+	spin_lock_irq(&reservation_lock);
+		while ((nr_reserved_pages-nr_reservations)>2*VMRESERVE) {
+			spin_unlock_irq(&reservation_lock);
+			page = __get_reserved_page2();
+			if (page!=NULL) { 
+				__free_page(page);
+			} else  {
+				nr_reserved_pages = nr_reservations;
+				printk("we have a leak \n");
+				return;
+			}
+			spin_lock_irq(&reservation_lock);
+		}
+	spin_unlock_irq(&reservation_lock);
+}
\ No newline at end of file
diff -urN linux.orig/mm/vmscan.c linux/mm/vmscan.c
--- linux.orig/mm/vmscan.c	Wed May  9 13:12:46 2001
+++ linux/mm/vmscan.c	Wed May  9 14:51:17 2001
@@ -422,6 +422,11 @@
 	return page;
 }
 
+
+extern void make_reservation(int pages);
+extern void put_reservations(void);
+
+
 /**
  * page_launder - clean dirty inactive pages, move to inactive_clean list
  * @gfp_mask: what operations we are allowed to do
@@ -541,8 +546,11 @@
 			ClearPageDirty(page);
 			page_cache_get(page);
 			spin_unlock(&pagemap_lru_lock);
-
-			writepage(page);
+			
+			make_reservation(4);
+			writepage(page);			
+			put_reservations();
+			
 			/* XXX: all ->writepage()s should use nr_async_pages */
 			if (!PageSwapCache(page))
 				flushed_pages++;
@@ -944,10 +952,14 @@
 	return (count < start_count);
 }
 
+extern void free_redundant_reservations(void);
+
 static int do_try_to_free_pages(unsigned int gfp_mask, int user)
 {
 	int ret = 0;
 
+	free_redundant_reservations();
+	
 	/*
 	 * If we're low on free pages, move pages from the
 	 * inactive_dirty list to the inactive_clean list.
