diff -urN v2.4.19-pre5/AIO-NOTES linux.diff/AIO-NOTES
--- v2.4.19-pre5/AIO-NOTES	Wed Dec 31 19:00:00 1969
+++ linux.diff/AIO-NOTES	Wed May  8 14:02:13 2002
@@ -0,0 +1,3 @@
+- aio context destruction is now synchronous: it waits for all pending 
+  ios to complete.  This will now cause a task that is exiting to be 
+  delayed if outstanding ios are executing.
diff -urN v2.4.19-pre5/Documentation/Configure.help linux.diff/Documentation/Configure.help
--- v2.4.19-pre5/Documentation/Configure.help	Wed Apr  3 21:04:25 2002
+++ linux.diff/Documentation/Configure.help	Tue Apr 30 17:29:31 2002
@@ -17983,6 +17983,26 @@
   contains more information and the location of the joystick package
   that you'll need.
 
+/dev/epoll support
+CONFIG_EVENTPOLL
+  This option will allow for the creation of a '/dev/epoll' character
+  device, with major number 10 (MISC_MAJOR) and minor number 124
+  (EVENTPOLL_MINOR).
+
+  This device can be used to very efficiently handle incoming events on a
+  socket, much more so than select() or poll(). There is a paper that
+  describes this device and how to program for it (as well as including
+  some very impressive benchmarks) at the following URL:
+  http://www.xmailserver.org/linux-patches/nio-improve.html
+
+  If you are writing very scalable servers and wish to code against
+  /dev/epoll for enhanced speed, say 'Y' or 'M' here. If you have
+  software in hand that requires (or can make use of) /dev/epoll,
+  also say 'Y' or 'M' here.
+
+  The vast majority of the planet can very safely say 'N' here
+  and breathe easily.
+
 Game port support
 CONFIG_INPUT_GAMEPORT
   Gameport support is for the standard 15-pin PC gameport.  If you
diff -urN v2.4.19-pre5/MAINTAINERS linux.diff/MAINTAINERS
--- v2.4.19-pre5/MAINTAINERS	Wed Apr  3 21:04:25 2002
+++ linux.diff/MAINTAINERS	Tue Apr  2 18:56:58 2002
@@ -228,6 +228,12 @@
 L:	linux-net@vger.kernel.org
 S:	Maintained
 
+ASYNC IO
+P:	Benjamin LaHaise
+M:	bcrl@redhat.com
+L:	linux-aio@kvack.org
+S:	Maintained
+
 AX.25 NETWORK LAYER
 P:	Matthias Welwarsky
 M:	dg2fef@afthd.tu-darmstadt.de
diff -urN v2.4.19-pre5/Makefile linux.diff/Makefile
--- v2.4.19-pre5/Makefile	Wed Apr  3 21:04:25 2002
+++ linux.diff/Makefile	Fri Apr 19 20:57:16 2002
@@ -226,7 +226,7 @@
 	drivers/sound/pndsperm.c \
 	drivers/sound/pndspini.c \
 	drivers/atm/fore200e_*_fw.c drivers/atm/.fore200e_*.fw \
-	.version .config* config.in config.old \
+	.uniquebytes .version .config* config.in config.old \
 	scripts/tkparse scripts/kconfig.tk scripts/kconfig.tmp \
 	scripts/lxdialog/*.o scripts/lxdialog/lxdialog \
 	.menuconfig.log \
@@ -268,6 +268,7 @@
 		--end-group \
 		-o vmlinux
 	$(NM) vmlinux | grep -v '\(compiled\)\|\(\.o$$\)\|\( [aUw] \)\|\(\.\.ng$$\)\|\(LASH[RL]DI\)' | sort > System.map
+	@$(MAKE) -C ulib
 
 symlinks:
 	rm -f include/asm
@@ -296,7 +297,7 @@
 
 linuxsubdirs: $(patsubst %, _dir_%, $(SUBDIRS))
 
-$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h include/config/MARKER
+$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/compile.h include/config/MARKER
 	$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C $(patsubst _dir_%, %, $@)
 
 $(TOPDIR)/include/linux/version.h: include/linux/version.h
@@ -322,6 +323,11 @@
 	   echo \#define LINUX_COMPILE_DOMAIN ; \
 	 fi >> .ver
 	@echo \#define LINUX_COMPILER \"`$(CC) $(CFLAGS) -v 2>&1 | tail -1`\" >> .ver
+	@rm -f .uniquebytes
+	@dd if=/dev/urandom of=.uniquebytes bs=1 count=16
+	@echo -n \#"define LINUX_UNIQUE_BYTES " >>.ver
+	@hexdump -v -e '1/1 "0x%02x, "' .uniquebytes | sed -e 's/, $$//g' >>.ver
+	@echo "" >>.ver
 	@mv -f .ver $@
 
 include/linux/version.h: ./Makefile
@@ -404,6 +410,8 @@
 .PHONY: $(patsubst %, _modinst_%, $(SUBDIRS))
 $(patsubst %, _modinst_%, $(SUBDIRS)) :
 	$(MAKE) -C $(patsubst _modinst_%, %, $@) modules_install
+	mkdir -p  $(INSTALL_MOD_PATH)/lib/kernel/$(KERNELRELEASE)/
+	install -m 755 ulib/libredhat-kernel.so.1.0.1 $(INSTALL_MOD_PATH)/lib/kernel/$(KERNELRELEASE)/
 
 # modules disabled....
 
@@ -423,6 +431,7 @@
 	rm -f $(CLEAN_FILES)
 	rm -rf $(CLEAN_DIRS)
 	$(MAKE) -C Documentation/DocBook clean
+	$(MAKE) -C ulib clean
 
 mrproper: clean archmrproper
 	find . \( -size 0 -o -name .depend \) -type f -print | xargs rm -f
diff -urN v2.4.19-pre5/arch/i386/Makefile linux.diff/arch/i386/Makefile
--- v2.4.19-pre5/arch/i386/Makefile	Thu May  3 11:22:07 2001
+++ linux.diff/arch/i386/Makefile	Tue Apr  2 18:56:58 2002
@@ -98,7 +98,7 @@
 DRIVERS += arch/i386/math-emu/math.o
 endif
 
-arch/i386/kernel: dummy
+arch/i386/kernel: dummy include/linux/compile.h
 	$(MAKE) linuxsubdirs SUBDIRS=arch/i386/kernel
 
 arch/i386/mm: dummy
diff -urN v2.4.19-pre5/arch/i386/kernel/Makefile linux.diff/arch/i386/kernel/Makefile
--- v2.4.19-pre5/arch/i386/kernel/Makefile	Mon Nov 26 23:43:07 2001
+++ linux.diff/arch/i386/kernel/Makefile	Tue Apr  2 18:56:58 2002
@@ -30,6 +30,10 @@
 endif
 endif
 
+obj-y += vsysdata.o vunique.o dynamic_syscall.o
+
+vunique.o: $(TOPDIR)/include/linux/compile.h
+
 obj-$(CONFIG_MCA)		+= mca.o
 obj-$(CONFIG_MTRR)		+= mtrr.o
 obj-$(CONFIG_X86_MSR)		+= msr.o
diff -urN v2.4.19-pre5/arch/i386/kernel/dynamic_syscall.c linux.diff/arch/i386/kernel/dynamic_syscall.c
--- v2.4.19-pre5/arch/i386/kernel/dynamic_syscall.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/arch/i386/kernel/dynamic_syscall.c	Tue Apr  2 18:56:58 2002
@@ -0,0 +1,90 @@
+/* arch/i386/kernel/dynamic_syscall.c
+ *	Entry code for dynamic syscalls on i386.
+ */
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+struct dummy_args {
+	long data[8];
+};
+
+extern struct vsyscall_entry {
+	long eip;
+	long (*call)(struct dummy_args args);
+} vsyscall_list_begin, vsyscall_list_end;
+
+long sys_dynamic_syscall(struct pt_regs regs) __attribute__((regparm(0)));
+
+long sys_dynamic_syscall(struct pt_regs regs)
+{
+	struct dummy_args dummy_args;
+	struct vsyscall_entry *ent = (void *)regs.edx;
+	void *args = (void *)regs.ecx;
+	long ret;
+
+	pr_debug("ent = %p  args = %p\n", ent, args);
+	pr_debug("eip = 0x%08lx\n", regs.eip);
+
+	if (unlikely(!current->mm->vsys_mapped))
+		goto err;
+
+	/* The pointer must be aligned in the table. */
+	if (unlikely((long)ent & (sizeof(*ent) - 1))) {
+		pr_debug("unaligned\n");
+		goto err;
+	}
+
+	/* Bounds checking... */
+	if (unlikely(ent < &vsyscall_list_begin) ||
+	    unlikely(ent >= &vsyscall_list_end)) {
+		pr_debug("out of range %p <= %p < %p\n", 
+			&vsyscall_list_begin, ent,
+			&vsyscall_list_end);
+		goto err;
+	}
+	/* The entry should be valid now.  Verify that the caller's eip 
+	 * is correct.
+	 */
+	if (unlikely(ent->eip != regs.eip)) {
+		pr_debug("eip mismatch (0x%lx vs 0x%lx)\n", ent->eip, regs.eip);
+		goto err;
+	}
+
+	pr_debug("ent->call = %p\n", ent->call);
+
+	if (unlikely(verify_area(VERIFY_READ, args, sizeof(dummy_args))))
+		return -EFAULT;
+	
+	__asm__ volatile (
+	"	cld			\n"
+	"	sub $0x20, %%esp		\n"
+	"	movl %%esp, %%edi		\n"
+	"	movl $0x8, %%ecx		\n"
+	"1:	rep movsl		\n"
+	"	call %%edx		\n"
+	"2:	add $0x20, %%esp		\n"
+
+/* the exception handling: just return -EFAULT */
+	".section .fixup, \"ax\"	\n"
+	"3:	movl $0xfffffff2, %%eax	\n"  /* -EFAULT */
+	"	jmp 2b			\n"
+	".previous			\n"
+	".section __ex_table,\"a\"	\n"
+	"       .align 4		\n"
+	"       .long 1b, 3b		\n"
+	".previous			\n"
+
+	: "=a" (ret)
+	: "S" (args), "d" (ent->call)
+	: "%edi", "%ecx" );
+
+	pr_debug("ret = 0x%08lx\n", ret);
+
+	return ret;
+err:
+	return -ENOSYS;
+}
diff -urN v2.4.19-pre5/arch/i386/kernel/entry.S linux.diff/arch/i386/kernel/entry.S
--- v2.4.19-pre5/arch/i386/kernel/entry.S	Wed Apr  3 21:04:26 2002
+++ linux.diff/arch/i386/kernel/entry.S	Tue Apr  2 18:56:58 2002
@@ -45,6 +45,7 @@
 #include <linux/linkage.h>
 #include <asm/segment.h>
 #include <asm/smp.h>
+#include <asm/unistd.h>
 
 EBX		= 0x00
 ECX		= 0x04
@@ -636,6 +637,11 @@
 	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for fremovexattr */
  	.long SYMBOL_NAME(sys_tkill)
 
+ 	.rept __NR_sys_dynamic_syscall-(.-sys_call_table)/4
+ 		.long SYMBOL_NAME(sys_ni_syscall)
+ 	.endr
+ 	.long SYMBOL_NAME(sys_dynamic_syscall)
+
 	.rept NR_syscalls-(.-sys_call_table)/4
 		.long SYMBOL_NAME(sys_ni_syscall)
 	.endr
diff -urN v2.4.19-pre5/arch/i386/kernel/irq.c linux.diff/arch/i386/kernel/irq.c
--- v2.4.19-pre5/arch/i386/kernel/irq.c	Mon Nov 12 17:49:47 2001
+++ linux.diff/arch/i386/kernel/irq.c	Fri Apr  5 18:28:20 2002
@@ -577,7 +577,17 @@
 	irq_desc_t *desc = irq_desc + irq;
 	struct irqaction * action;
 	unsigned int status;
+	long esp;
 
+	/* Debugging check for stack overflow: is there less than 2KB free? */
+	__asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
+	if (esp < (sizeof(struct task_struct) + 2048)) {
+		printk("do_IRQ: stack overflow: %ld\n",
+			esp - sizeof(struct task_struct));
+		__asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
+		show_stack((void *)esp);
+	}
+	
 	kstat.irqs[cpu][irq]++;
 	spin_lock(&desc->lock);
 	desc->handler->ack(irq);
diff -urN v2.4.19-pre5/arch/i386/kernel/vsysdata.c linux.diff/arch/i386/kernel/vsysdata.c
--- v2.4.19-pre5/arch/i386/kernel/vsysdata.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/arch/i386/kernel/vsysdata.c	Tue Apr  2 18:56:58 2002
@@ -0,0 +1,11 @@
+/* vsysdata.c	- declarations for variables shared with the kernel
+ *
+ *	Items placed in .data.vsyscall have a kernel virtual address 
+ *	and are read/write from kernel space only.  The copy placed 
+ *	in .vsyscall_data are linked at a userspace address and are 
+ *	read only accessible from userland.
+ */
+#include <linux/vsyscall.h>
+
+union vsys_union user_vsys_cpudata[256] __attribute__((section(".vsyscall_data")));
+//asm(".globl vsys_cpudata ; bobbob = user_vsys_cpudata - vsyscall_text_begin ; vsys_cpudata = bobbob + VSYSCALL_text");
diff -urN v2.4.19-pre5/arch/i386/kernel/vunique.S linux.diff/arch/i386/kernel/vunique.S
--- v2.4.19-pre5/arch/i386/kernel/vunique.S	Wed Dec 31 19:00:00 1969
+++ linux.diff/arch/i386/kernel/vunique.S	Tue Apr  2 18:56:58 2002
@@ -0,0 +1,7 @@
+#include <linux/compile.h>
+
+	.section .first_vsyscall_text,"xa"
+	.globl	signature
+signature:
+	.byte	LINUX_UNIQUE_BYTES
+	.size	signature,.-signature
diff -urN v2.4.19-pre5/arch/i386/mm/fault.c linux.diff/arch/i386/mm/fault.c
--- v2.4.19-pre5/arch/i386/mm/fault.c	Wed Apr  3 21:04:26 2002
+++ linux.diff/arch/i386/mm/fault.c	Tue Apr  9 18:03:05 2002
@@ -27,6 +27,8 @@
 
 extern void die(const char *,struct pt_regs *,long);
 
+spinlock_t oops_lock = SPIN_LOCK_UNLOCKED;
+
 /*
  * Ugly, ugly, but the goto's result in better assembly..
  */
@@ -306,7 +308,7 @@
  * Oops. The kernel tried to access some bad page. We'll have to
  * terminate things with extreme prejudice.
  */
-
+	spin_lock(&oops_lock);
 	bust_spinlocks(1);
 
 	if (address < PAGE_SIZE)
@@ -327,6 +329,7 @@
 	}
 	die("Oops", regs, error_code);
 	bust_spinlocks(0);
+	spin_unlock(&oops_lock);
 	do_exit(SIGKILL);
 
 /*
diff -urN v2.4.19-pre5/arch/i386/vmlinux.lds linux.diff/arch/i386/vmlinux.lds
--- v2.4.19-pre5/arch/i386/vmlinux.lds	Thu Mar  7 16:39:56 2002
+++ linux.diff/arch/i386/vmlinux.lds	Tue Apr  2 18:56:58 2002
@@ -14,6 +14,27 @@
 	*(.gnu.warning)
 	} = 0x9090
 
+	/* Note: most of these declarations are in kernel/vsysdata.c,vsyscall.S.
+	 * We use two segments for the data liked at a kernel virtual address 
+	 * (.data.vsyscall) and user virtual address (.vsyscall_data).
+	 * .vsyscall_text is linked at a kernel virtual address
+	 */
+	. = ALIGN(4096);
+	VSYSCALL_text = .;
+	VSYSCALL 0xbfff0000 : AT ( VSYSCALL_text ) {
+		vsyscall_text_begin = .;
+		*(.first_vsyscall_text)
+		*(.vsyscall_text)
+		. = ALIGN(4096);
+		vsyscall_text_end = .;
+		*(.vsyscall_data)
+		. = ALIGN(4096);
+		vsyscall_data_end = .;
+	}
+	vsys_cpudata = user_vsys_cpudata - vsyscall_text_begin + VSYSCALL_text;
+	. = VSYSCALL_text + SIZEOF(VSYSCALL);
+	VSYSCALL_text_end = .;
+
   _etext = .;			/* End of text section */
 
   .rodata : { *(.rodata) *(.rodata.*) }
@@ -30,6 +51,17 @@
 
   .data : {			/* Data */
 	*(.data)
+
+	. = ALIGN(8);
+	vsyscall_list_begin = .;
+	*(.data.vsyscall_list)
+	vsyscall_list_end = .;
+	. = ALIGN(4096);
+	kernel_vsyscall_data_begin = .;
+	*(.data.vsyscall)
+	. = ALIGN(4096);
+	kernel_vsyscall_data_end = .;
+
 	CONSTRUCTORS
 	}
 
@@ -79,4 +111,15 @@
   .stab.index 0 : { *(.stab.index) }
   .stab.indexstr 0 : { *(.stab.indexstr) }
   .comment 0 : { *(.comment) }
+/*
+#  VSYSCALL : {
+#	/ * vsyscall area *i /
+#	__vsyscall_begin = .;
+#	*(vsyscall_text)
+#	. = ALIGN(4096);
+#	*(.data.vsyscall)
+#	. = ALIGN(4096);
+#	__vsyscall_end = .;
+#  } >vsyscall_area
+*/
 }
diff -urN v2.4.19-pre5/drivers/block/loop.c linux.diff/drivers/block/loop.c
--- v2.4.19-pre5/drivers/block/loop.c	Wed Apr  3 21:04:30 2002
+++ linux.diff/drivers/block/loop.c	Tue Apr  2 18:56:57 2002
@@ -283,7 +283,7 @@
 	spin_lock_irq(&lo->lo_lock);
 	file = lo->lo_backing_file;
 	spin_unlock_irq(&lo->lo_lock);
-	do_generic_file_read(file, &pos, &desc, lo_read_actor);
+	do_generic_file_read(file, &pos, &desc, lo_read_actor, 0);
 	return desc.error;
 }
 
diff -urN v2.4.19-pre5/drivers/char/Config.in linux.diff/drivers/char/Config.in
--- v2.4.19-pre5/drivers/char/Config.in	Wed Apr  3 21:04:30 2002
+++ linux.diff/drivers/char/Config.in	Tue Apr 30 17:29:31 2002
@@ -220,6 +220,7 @@
 dep_tristate 'AMD 768 Random Number Generator support' CONFIG_AMD_RNG $CONFIG_PCI
 dep_tristate 'Intel i8x0 Random Number Generator support' CONFIG_INTEL_RNG $CONFIG_PCI
 tristate '/dev/nvram support' CONFIG_NVRAM
+tristate '/dev/epoll - Efficent file event polling method' CONFIG_EVENTPOLL
 tristate 'Enhanced Real Time Clock Support' CONFIG_RTC
 if [ "$CONFIG_IA64" = "y" ]; then
    bool 'EFI Real Time Clock Services' CONFIG_EFI_RTC
diff -urN v2.4.19-pre5/drivers/char/Makefile linux.diff/drivers/char/Makefile
--- v2.4.19-pre5/drivers/char/Makefile	Wed Apr  3 21:04:30 2002
+++ linux.diff/drivers/char/Makefile	Tue Apr 30 17:29:31 2002
@@ -208,6 +208,7 @@
 ifeq ($(CONFIG_PPC),)
   obj-$(CONFIG_NVRAM) += nvram.o
 endif
+obj-$(CONFIG_EVENTPOLL) += eventpoll.o
 obj-$(CONFIG_TOSHIBA) += toshiba.o
 obj-$(CONFIG_I8K) += i8k.o
 obj-$(CONFIG_DS1620) += ds1620.o
diff -urN v2.4.19-pre5/drivers/char/eventpoll.c linux.diff/drivers/char/eventpoll.c
--- v2.4.19-pre5/drivers/char/eventpoll.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/drivers/char/eventpoll.c	Tue Apr 30 17:29:31 2002
@@ -0,0 +1,800 @@
+/*
+ *  drivers/char/eventpoll.c
+ *
+ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
+ *
+ *  Efficent event polling implementation
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/random.h>
+#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/fcblist.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#include <linux/eventpoll.h>
+
+
+
+
+
+#define DEBUG	0
+#ifdef DEBUG
+#define DPRINTK(x)	printk x
+#define DNPRINTK(n,x)	if (n <= DEBUG) printk x
+#else
+#define DPRINTK(x)
+#define DNPRINTK(n,x)
+#endif
+
+#define DEBUG_DPI	0
+
+#if DEBUG_DPI
+#define DPI_SLAB_DEBUG	(SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
+#else
+#define DPI_SLAB_DEBUG	0
+#endif
+
+#define INITIAL_HASH_BITS	7
+#define MAX_HASH_BITS	18
+#define RESIZE_LENGTH	2
+
+#define dpi_mem_alloc()	(struct epitem *) kmem_cache_alloc(dpi_cache, SLAB_KERNEL)
+#define dpi_mem_free(p) kmem_cache_free(dpi_cache, p)
+
+
+
+
+
+typedef unsigned long long event_version_t;
+
+struct eventpoll {
+	rwlock_t lock;
+	wait_queue_head_t wq;
+	wait_queue_head_t poll_wait;
+	struct list_head *hash;
+	unsigned int hbits;
+	unsigned int hmask;
+	atomic_t hents;
+	atomic_t resize;
+	int numpages;
+	char **pages;
+	char *pages0[MAX_EVENTPOLL_PAGES];
+	char *pages1[MAX_EVENTPOLL_PAGES];
+	atomic_t mmapped;
+	int eventcnt;
+	event_version_t ver;
+};
+
+struct epitem {
+	struct list_head llink;
+	struct eventpoll *ep;
+	struct file *file;
+	struct pollfd pfd;
+	int index;
+	event_version_t ver;
+};
+
+
+
+
+
+
+static int ep_alloc_pages(char **pages, int numpages);
+static int ep_free_pages(char **pages, int numpages);
+static int ep_init(struct eventpoll *ep);
+static void ep_free(struct eventpoll *ep);
+static inline struct epitem *ep_find_nl(struct eventpoll *ep, int fd);
+static struct epitem *ep_find(struct eventpoll *ep, int fd);
+static int ep_hashresize(struct eventpoll *ep, unsigned long *kflags);
+static int ep_insert(struct eventpoll *ep, struct pollfd *pfd);
+static int ep_remove(struct eventpoll *ep, struct epitem *dpi);
+static void notify_proc(struct file *file, void *data, unsigned long *local, long *event);
+static int open_eventpoll(struct inode *inode, struct file *file);
+static int close_eventpoll(struct inode *inode, struct file *file);
+static unsigned int poll_eventpoll(struct file *file, poll_table *wait);
+static int write_eventpoll(struct file *file, const char *buffer, size_t count,
+		loff_t *ppos);
+static int ep_poll(struct eventpoll *ep, void *arg);
+static int ioctl_eventpoll(struct inode *inode, struct file *file,
+		unsigned int cmd, unsigned long arg);
+static void eventpoll_mm_open(struct vm_area_struct * vma);
+static void eventpoll_mm_close(struct vm_area_struct * vma);
+static int mmap_eventpoll(struct file *file, struct vm_area_struct *vma);
+
+
+
+
+static kmem_cache_t *dpi_cache;
+
+static struct file_operations eventpoll_fops = {
+	write: write_eventpoll,
+	ioctl: ioctl_eventpoll,
+	mmap: mmap_eventpoll,
+	open: open_eventpoll,
+	release: close_eventpoll,
+	poll: poll_eventpoll
+};
+
+static struct vm_operations_struct eventpoll_mmap_ops = {
+	open: eventpoll_mm_open,
+	close: eventpoll_mm_close,
+};
+
+static struct miscdevice eventpoll = {
+	EVENTPOLL_MINOR, "eventpoll", &eventpoll_fops
+};
+
+
+
+
+static int ep_alloc_pages(char **pages, int numpages)
+{
+	int ii;
+
+	for (ii = 0; ii < numpages; ii++) {
+		pages[ii] = (char *) __get_free_pages(GFP_KERNEL, 0);
+		if (!pages[ii]) {
+			for (--ii; ii >= 0; ii--) {
+				clear_bit(PG_reserved, &virt_to_page(pages[ii])->flags);
+				free_pages((unsigned long) pages[ii], 0);
+			}
+			return -ENOMEM;
+		}
+		set_bit(PG_reserved, &virt_to_page(pages[ii])->flags);
+	}
+	return 0;
+}
+
+
+static int ep_free_pages(char **pages, int numpages)
+{
+	int ii;
+
+	for (ii = 0; ii < numpages; ii++) {
+		clear_bit(PG_reserved, &virt_to_page(pages[ii])->flags);
+		free_pages((unsigned long) pages[ii], 0);
+	}
+	return 0;
+}
+
+
+static int ep_init(struct eventpoll *ep)
+{
+	int ii, hentries;
+
+	rwlock_init(&ep->lock);
+	init_waitqueue_head(&ep->wq);
+	init_waitqueue_head(&ep->poll_wait);
+	ep->hbits = INITIAL_HASH_BITS;
+	ep->hmask = (1 << ep->hbits) - 1;
+	atomic_set(&ep->hents, 0);
+	atomic_set(&ep->resize, 0);
+	atomic_set(&ep->mmapped, 0);
+	ep->numpages = 0;
+	ep->pages = ep->pages0;
+	ep->eventcnt = 0;
+	ep->ver = 1;
+
+	hentries = ep->hmask + 1;
+	if (!(ep->hash = (struct list_head *) vmalloc(hentries * sizeof(struct list_head))))
+		return -ENOMEM;
+
+	for (ii = 0; ii < hentries; ii++)
+		INIT_LIST_HEAD(&ep->hash[ii]);
+
+	return 0;
+}
+
+
+static void ep_free(struct eventpoll *ep)
+{
+	int ii;
+	struct list_head *lnk;
+
+	lock_kernel();
+	for (ii = 0; ii <= ep->hmask; ii++) {
+		while ((lnk = list_first(&ep->hash[ii]))) {
+			struct epitem *dpi = list_entry(lnk, struct epitem, llink);
+
+			file_notify_delcb(dpi->file, notify_proc);
+			list_del(lnk);
+			dpi_mem_free(dpi);
+		}
+	}
+	vfree(ep->hash);
+	if (ep->numpages > 0) {
+		ep_free_pages(ep->pages0, ep->numpages);
+		ep_free_pages(ep->pages1, ep->numpages);
+	}
+	unlock_kernel();
+}
+
+
+static inline struct epitem *ep_find_nl(struct eventpoll *ep, int fd)
+{
+	struct epitem *dpi = NULL;
+	struct list_head *lsthead, *lnk;
+
+	lsthead = &ep->hash[fd & ep->hmask];
+	list_for_each(lnk, lsthead) {
+		dpi = list_entry(lnk, struct epitem, llink);
+
+		if (dpi->pfd.fd == fd) break;
+		dpi = NULL;
+	}
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_find(%d) -> %p\n", current, fd, dpi));
+
+	return dpi;
+}
+
+
+static struct epitem *ep_find(struct eventpoll *ep, int fd)
+{
+	struct epitem *dpi;
+	unsigned long flags;
+
+	read_lock_irqsave(&ep->lock, flags);
+
+	dpi = ep_find_nl(ep, fd);
+
+	read_unlock_irqrestore(&ep->lock, flags);
+
+	return dpi;
+}
+
+
+static int ep_hashresize(struct eventpoll *ep, unsigned long *kflags)
+{
+	struct list_head *hash, *oldhash;
+	unsigned int hbits = ep->hbits + 1;
+	unsigned int hmask = (1 << hbits) - 1;
+	int ii, res, hentries = hmask + 1;
+	unsigned long flags = *kflags;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_hashresize(%p) bits=%u\n", current, ep, hbits));
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	res = -ENOMEM;
+	if (!(hash = (struct list_head *) vmalloc(hentries * sizeof(struct list_head)))) {
+		write_lock_irqsave(&ep->lock, flags);
+		goto out;
+	}
+
+	for (ii = 0; ii < hentries; ii++)
+		INIT_LIST_HEAD(&hash[ii]);
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	oldhash = ep->hash;
+	for (ii = 0; ii <= ep->hmask; ii++) {
+		struct list_head *oldhead = &oldhash[ii], *lnk;
+
+		while ((lnk = list_first(oldhead))) {
+			struct epitem *dpi = list_entry(lnk, struct epitem, llink);
+
+			list_del(lnk);
+			list_add(lnk, &hash[dpi->pfd.fd & hmask]);
+		}
+	}
+
+	ep->hash = hash;
+	ep->hbits = hbits;
+	ep->hmask = hmask;
+
+	write_unlock_irqrestore(&ep->lock, flags);
+	vfree(oldhash);
+	write_lock_irqsave(&ep->lock, flags);
+
+	res = 0;
+out:
+	*kflags = flags;
+	atomic_dec(&ep->resize);
+	return res;
+}
+
+
+static int ep_insert(struct eventpoll *ep, struct pollfd *pfd)
+{
+	struct epitem *dpi;
+	struct file *file;
+	unsigned long flags;
+
+	if (atomic_read(&ep->hents) >= (ep->numpages * POLLFD_X_PAGE))
+		return -E2BIG;
+
+	if (!(file = fcheck(pfd->fd)))
+		return -EINVAL;
+
+	if (!(dpi = dpi_mem_alloc()))
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dpi->llink);
+	dpi->ep = ep;
+	dpi->file = file;
+	dpi->pfd = *pfd;
+	dpi->index = -1;
+	dpi->ver = ep->ver - 1;
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	list_add(&dpi->llink, &ep->hash[pfd->fd & ep->hmask]);
+	atomic_inc(&ep->hents);
+
+	if (!atomic_read(&ep->resize) &&
+			(atomic_read(&ep->hents) >> ep->hbits) > RESIZE_LENGTH &&
+			ep->hbits < MAX_HASH_BITS) {
+		atomic_inc(&ep->resize);
+		ep_hashresize(ep, &flags);
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	file_notify_addcb(file, notify_proc, dpi);
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_insert(%p, %d)\n", current, ep, pfd->fd));
+
+	return 0;
+}
+
+
+static int ep_remove(struct eventpoll *ep, struct epitem *dpi)
+{
+	int fd = dpi->pfd.fd;
+	unsigned long flags;
+	struct pollfd *pfd, *lpfd;
+	struct epitem *ldpi;
+
+	file_notify_delcb(dpi->file, notify_proc);
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	list_del(&dpi->llink);
+	atomic_dec(&ep->hents);
+
+	if (dpi->index >= 0 && dpi->ver == ep->ver && dpi->index < ep->eventcnt) {
+		pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
+								 EVENT_PAGE_OFFSET(dpi->index));
+		if (pfd->fd == dpi->pfd.fd && dpi->index < --ep->eventcnt) {
+			lpfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(ep->eventcnt)] +
+									  EVENT_PAGE_OFFSET(ep->eventcnt));
+			*pfd = *lpfd;
+
+			if ((ldpi = ep_find_nl(ep, pfd->fd))) ldpi->index = dpi->index;
+		}
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	dpi_mem_free(dpi);
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_remove(%p, %d)\n", current, ep, fd));
+
+	return 0;
+}
+
+
+static void notify_proc(struct file *file, void *data, unsigned long *local, long *event)
+{
+	struct epitem *dpi = (struct epitem *) data;
+	struct eventpoll *ep = dpi->ep;
+	struct pollfd *pfd;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: notify(%p, %p, %ld, %ld) ep=%p\n",
+			current, file, data, event[0], event[1], ep));
+
+	write_lock(&ep->lock);
+	if (!(dpi->pfd.events & event[1]))
+		goto out;
+
+	if (dpi->index < 0 || dpi->ver != ep->ver) {
+		if (ep->eventcnt >= (ep->numpages * POLLFD_X_PAGE))
+			goto out;
+		dpi->index = ep->eventcnt++;
+		dpi->ver = ep->ver;
+		pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
+				EVENT_PAGE_OFFSET(dpi->index));
+		*pfd = dpi->pfd;
+	} else {
+		pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
+				EVENT_PAGE_OFFSET(dpi->index));
+		if (pfd->fd != dpi->pfd.fd) {
+			if (ep->eventcnt >= (ep->numpages * POLLFD_X_PAGE))
+				goto out;
+			dpi->index = ep->eventcnt++;
+			pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
+					EVENT_PAGE_OFFSET(dpi->index));
+			*pfd = dpi->pfd;
+		}
+	}
+
+	pfd->revents |= (pfd->events & event[1]);
+
+	if (waitqueue_active(&ep->wq))
+		wake_up(&ep->wq);
+	if (waitqueue_active(&ep->poll_wait))
+		wake_up(&ep->poll_wait);
+out:
+	write_unlock(&ep->lock);
+}
+
+
+static int open_eventpoll(struct inode *inode, struct file *file)
+{
+	int res;
+	struct eventpoll *ep;
+
+	if (!(ep = kmalloc(sizeof(struct eventpoll), GFP_KERNEL)))
+		return -ENOMEM;
+
+	memset(ep, 0, sizeof(*ep));
+	if ((res = ep_init(ep))) {
+		kfree(ep);
+		return res;
+	}
+
+	file->private_data = ep;
+
+	MOD_INC_USE_COUNT;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: open() ep=%p\n", current, ep));
+	return 0;
+}
+
+
+static int close_eventpoll(struct inode *inode, struct file *file)
+{
+	struct eventpoll *ep = file->private_data;
+
+	ep_free(ep);
+
+	kfree(ep);
+
+	MOD_DEC_USE_COUNT;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: close() ep=%p\n", current, ep));
+	return 0;
+}
+
+
+static unsigned int poll_eventpoll(struct file *file, poll_table *wait)
+{
+	struct eventpoll *ep = file->private_data;
+
+	poll_wait(file, &ep->poll_wait, wait);
+	if (ep->eventcnt)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+
+static int write_eventpoll(struct file *file, const char *buffer, size_t count,
+		loff_t *ppos)
+{
+	int res, rcount;
+	struct eventpoll *ep = file->private_data;
+	struct epitem *dpi;
+	struct pollfd pfd;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: write(%p, %d)\n", current, ep, count));
+
+	if (count % sizeof(struct pollfd))
+		return -EINVAL;
+
+	if ((res = verify_area(VERIFY_READ, buffer, count)))
+		return res;
+
+	rcount = 0;
+
+	lock_kernel();
+
+	while (count > 0) {
+		__copy_from_user(&pfd, buffer, sizeof(pfd));
+
+		dpi = ep_find(ep, pfd.fd);
+
+		if (pfd.fd >= current->files->max_fds || !current->files->fd[pfd.fd])
+			pfd.events = POLLREMOVE;
+		if (pfd.events & POLLREMOVE) {
+			if (dpi) {
+				ep_remove(ep, dpi);
+				rcount += sizeof(pfd);
+			}
+		}
+		else if (dpi) {
+			dpi->pfd.events = pfd.events;
+			rcount += sizeof(pfd);
+		} else {
+			pfd.revents = 0;
+			if (!ep_insert(ep, &pfd))
+				rcount += sizeof(pfd);
+		}
+
+		buffer += sizeof(pfd);
+		count -= sizeof(pfd);
+	}
+
+	unlock_kernel();
+
+	return rcount;
+}
+
+
+static int ep_poll(struct eventpoll *ep, void *arg)
+{
+	int res = 0;
+	long timeout;
+	unsigned long flags;
+	struct evpoll dvp;
+	wait_queue_t wait;
+
+	if (copy_from_user(&dvp, arg, sizeof(struct evpoll)))
+		return -EFAULT;
+
+	if (!atomic_read(&ep->mmapped))
+		return -EINVAL;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_POLL, %d)\n", current, ep, dvp.ep_timeout));
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	res = 0;
+	if (!ep->eventcnt) {
+		init_waitqueue_entry(&wait, current);
+		add_wait_queue(&ep->wq, &wait);
+		timeout = dvp.ep_timeout == -1 || dvp.ep_timeout > MAX_SCHEDULE_TIMEOUT/HZ ?
+			MAX_SCHEDULE_TIMEOUT: (dvp.ep_timeout * HZ) / 1000;
+		for (;;) {
+			if (ep->eventcnt || !timeout)
+				break;
+			if (signal_pending(current)) {
+				res = -EINTR;
+				break;
+			}
+
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			write_unlock_irqrestore(&ep->lock, flags);
+			timeout = schedule_timeout(timeout);
+			write_lock_irqsave(&ep->lock, flags);
+		}
+		remove_wait_queue(&ep->wq, &wait);
+
+		set_current_state(TASK_RUNNING);
+	}
+
+	if (!res && ep->eventcnt) {
+		res = ep->eventcnt;
+		ep->eventcnt = 0;
+		++ep->ver;
+		if (ep->pages == ep->pages0) {
+			ep->pages = ep->pages1;
+			dvp.ep_resoff = 0;
+		} else {
+			ep->pages = ep->pages0;
+			dvp.ep_resoff = ep->numpages * PAGE_SIZE;
+		}
+	}
+
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	if (res > 0)
+		copy_to_user(arg, &dvp, sizeof(struct evpoll));
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_POLL, %d) == %d\n", current, ep, dvp.ep_timeout, res));
+	return res;
+}
+
+
+static int ioctl_eventpoll(struct inode *inode, struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	int res, numpages;
+	struct eventpoll *ep = file->private_data;
+	struct epitem *dpi;
+	unsigned long flags;
+	struct pollfd pfd;
+
+	switch (cmd) {
+	case EP_ALLOC:
+		if (atomic_read(&ep->mmapped))
+			return -EBUSY;
+
+		numpages = EP_FDS_PAGES(arg);
+		if (numpages > MAX_EVENTPOLL_PAGES)
+			return -EINVAL;
+
+		res = 0;
+		write_lock_irqsave(&ep->lock, flags);
+		if (numpages > ep->numpages) {
+			if (!(res = ep_alloc_pages(&ep->pages0[ep->numpages], numpages - ep->numpages))) {
+				if (!(res = ep_alloc_pages(&ep->pages1[ep->numpages], numpages - ep->numpages))) {
+					ep->numpages = numpages;
+				} else {
+					ep_free_pages(&ep->pages0[ep->numpages], numpages - ep->numpages);
+				}
+			}
+		}
+		write_unlock_irqrestore(&ep->lock, flags);
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_ALLOC, %lu) == %d\n",
+					 current, ep, arg, res));
+		return res;
+
+	case EP_FREE:
+		if (atomic_read(&ep->mmapped))
+			return -EBUSY;
+
+		res = -EINVAL;
+		write_lock_irqsave(&ep->lock, flags);
+		if (ep->numpages > 0) {
+			ep_free_pages(ep->pages0, ep->numpages);
+			ep_free_pages(ep->pages1, ep->numpages);
+			ep->numpages = 0;
+			ep->pages = ep->pages0;
+			res = 0;
+		}
+		write_unlock_irqrestore(&ep->lock, flags);
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_FREE) == %d\n",
+					 current, ep, res));
+		return res;
+
+	case EP_POLL:
+		return ep_poll(ep, (void *) arg);
+
+	case EP_ISPOLLED:
+		if (copy_from_user(&pfd, (void *) arg, sizeof(struct pollfd)))
+			return 0;
+
+		read_lock_irqsave(&ep->lock, flags);
+
+		res = 0;
+		if (!(dpi = ep_find_nl(ep, pfd.fd)))
+			goto out_ispolled;
+
+		pfd = dpi->pfd;
+		res = 1;
+
+	out_ispolled:
+		read_unlock_irqrestore(&ep->lock, flags);
+
+		if (res)
+			copy_to_user((void *) arg, &pfd, sizeof(struct pollfd));
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_ISPOLLED, %d) == %d\n",
+					 current, ep, pfd.fd, res));
+		return res;
+	}
+
+	return -EINVAL;
+}
+
+
+static void eventpoll_mm_open(struct vm_area_struct * vma)
+{
+	struct file *file = vma->vm_file;
+	struct eventpoll *ep = file->private_data;
+
+	if (ep) atomic_inc(&ep->mmapped);
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mm_open(%p)\n", current, ep));
+}
+
+
+static void eventpoll_mm_close(struct vm_area_struct * vma)
+{
+	struct file *file = vma->vm_file;
+	struct eventpoll *ep = file->private_data;
+
+	if (ep) atomic_dec(&ep->mmapped);
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mm_close(%p)\n", current, ep));
+}
+
+
+static int mmap_eventpoll(struct file *file, struct vm_area_struct *vma)
+{
+	struct eventpoll *ep = file->private_data;
+	unsigned long start, flags;
+	int ii, res;
+	int numpages;
+	size_t mapsize;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mmap(%p, %lx, %lx)\n",
+			current, ep, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT));
+
+	if ((vma->vm_pgoff << PAGE_SHIFT) != 0)
+		return -EINVAL;
+
+	mapsize = PAGE_ALIGN(vma->vm_end - vma->vm_start);
+	numpages = mapsize >> PAGE_SHIFT;
+
+	write_lock_irqsave(&ep->lock, flags);
+
+	res = -EINVAL;
+	if (numpages != (2 * ep->numpages))
+		goto out;
+
+	start = vma->vm_start;
+	for (ii = 0; ii < ep->numpages; ii++) {
+		if (remap_page_range(start, __pa(ep->pages0[ii]),
+				PAGE_SIZE, vma->vm_page_prot))
+    		goto out;
+		start += PAGE_SIZE;
+	}
+	for (ii = 0; ii < ep->numpages; ii++) {
+		if (remap_page_range(start, __pa(ep->pages1[ii]),
+				PAGE_SIZE, vma->vm_page_prot))
+    		goto out;
+		start += PAGE_SIZE;
+	}
+	vma->vm_ops = &eventpoll_mmap_ops;
+	atomic_set(&ep->mmapped, 1);
+	res = 0;
+out:
+	write_unlock_irqrestore(&ep->lock, flags);
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mmap(%p, %lx, %lx) == %d\n",
+		 	current, ep, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, res));
+	return res;
+}
+
+
+int __init eventpoll_init(void)
+{
+	dpi_cache = kmem_cache_create("eventpoll",
+			sizeof(struct epitem),
+			__alignof__(struct epitem),
+			DPI_SLAB_DEBUG, NULL, NULL);
+	if (!dpi_cache) {
+		printk(KERN_INFO "[%p] /dev/epoll: driver install failed.\n", current);
+		return -ENOMEM;
+	}
+
+	printk(KERN_INFO "[%p] /dev/epoll: driver installed.\n", current);
+
+	misc_register(&eventpoll);
+
+	return 0;
+}
+
+
+module_init(eventpoll_init);
+
+#ifdef MODULE
+
+void cleanup_module(void)
+{
+	misc_deregister(&eventpoll);
+	kmem_cache_destroy(dpi_cache);
+}
+
+#endif
+
+MODULE_LICENSE("GPL"); 
+
diff -urN v2.4.19-pre5/drivers/char/mem.c linux.diff/drivers/char/mem.c
--- v2.4.19-pre5/drivers/char/mem.c	Wed Apr  3 21:04:30 2002
+++ linux.diff/drivers/char/mem.c	Tue Apr  2 18:56:58 2002
@@ -25,6 +25,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/pgalloc.h>
+#include <asm/a.out.h>
 
 #ifdef CONFIG_I2C
 extern int i2c_init_all(void);
@@ -572,6 +573,87 @@
 	write:		write_full,
 };
 
+void vsys_mmap_close(struct vm_area_struct *area)
+{
+	area->vm_mm->vsys_mapped = 0;
+}
+
+static struct vm_operations_struct vsys_mmap_ops = {
+	close:		vsys_mmap_close,
+};
+
+int vsys_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	extern unsigned char vsyscall_text_begin, vsyscall_text_end, VSYSCALL_text[];
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long len = vma->vm_end - vma->vm_start;
+	unsigned long actual_len = &vsyscall_text_end - &vsyscall_text_begin;
+
+	if ((offset + len) > actual_len)
+		len = actual_len - offset;
+
+	pr_debug("len = 0x%lx, actual_len = 0x%lx\n", len, actual_len);
+
+	vma->vm_start = (unsigned long)&vsyscall_text_begin + offset;
+	vma->vm_end = vma->vm_start + len;
+	vma->vm_flags |= VM_RESERVED;
+
+	pr_debug("vm_start = 0x%lx, vm_end = 0x%lx\n",
+		vma->vm_start, vma->vm_end);
+	pr_debug("va=%p  pa=0x%lx\n",
+		VSYSCALL_text + offset,
+		__pa(VSYSCALL_text) + offset);
+
+	if (vma->vm_start < (unsigned long)&vsyscall_text_begin) {
+		pr_debug("vsys_mmap: start < begin\n");
+		return -EINVAL;
+	}
+
+	if (vma->vm_end < (unsigned long)&vsyscall_text_begin) {
+		pr_debug("vsys_mmap: end < begin\n");
+		return -EINVAL;
+	}
+
+	if (vma->vm_end > (unsigned long)&vsyscall_text_end) {
+		pr_debug("vsys_mmap: end(%lx) > text_end(%p)\n",
+			vma->vm_end, &vsyscall_text_end);
+		return -EINVAL;
+	}
+
+	if (vma->vm_start >= vma->vm_end) {
+		pr_debug("vsys_mmap: end\n");
+		return -EINVAL;
+	}
+
+	if (find_vma_intersection(current->mm, vma->vm_start, vma->vm_end)) {
+		pr_debug("vsyscall: mapping collision\n");
+		return -EINVAL;
+	}
+
+	if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == (VM_SHARED | VM_WRITE)) {
+		pr_debug("vsyscall: attempt to write to mapping\n");
+		return -EPERM;
+	}
+
+	if (remap_page_range(vma->vm_start,
+			     __pa(VSYSCALL_text) + offset,
+			     vma->vm_end-vma->vm_start,
+			     vma->vm_page_prot))
+		return -EAGAIN;
+
+	pr_debug("VSYSCALL_text(%p): %02x %02x %02x %02x\n",
+		VSYSCALL_text,
+		VSYSCALL_text[0], VSYSCALL_text[1],
+		VSYSCALL_text[2], VSYSCALL_text[3]);
+
+	current->mm->vsys_mapped = 1;
+	return 0;
+}
+
+static struct file_operations vsys_fops = {
+	mmap:		vsys_mmap,
+};
+
 static int memory_open(struct inode * inode, struct file * filp)
 {
 	switch (MINOR(inode->i_rdev)) {
@@ -601,6 +683,9 @@
 		case 9:
 			filp->f_op = &urandom_fops;
 			break;
+		case 10:
+			filp->f_op = &vsys_fops;
+			break;
 		default:
 			return -ENXIO;
 	}
@@ -627,7 +712,8 @@
 	{5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
 	{7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
 	{8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
-	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops}
+	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
+	{10,"vsys",    S_IRUGO,			    &vsys_fops},
     };
     int i;
 
diff -urN v2.4.19-pre5/drivers/char/raw.c linux.diff/drivers/char/raw.c
--- v2.4.19-pre5/drivers/char/raw.c	Mon Sep 24 02:16:03 2001
+++ linux.diff/drivers/char/raw.c	Tue Apr  2 18:56:58 2002
@@ -16,6 +16,8 @@
 #include <linux/capability.h>
 #include <linux/smp_lock.h>
 #include <asm/uaccess.h>
+#include <linux/kiovec.h>
+#include <linux/slab.h>
 
 #define dprintk(x...) 
 
@@ -34,13 +36,18 @@
 int	raw_open(struct inode *, struct file *);
 int	raw_release(struct inode *, struct file *);
 int	raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
-
+int	raw_kvec_read(struct file *filp, kvec_cb_t cb, size_t size, loff_t pos);
+int	raw_kvec_write(struct file *filp, kvec_cb_t cb, size_t size, loff_t pos);
 
 static struct file_operations raw_fops = {
 	read:		raw_read,
 	write:		raw_write,
 	open:		raw_open,
 	release:	raw_release,
+	aio_read:	generic_file_aio_read,
+	aio_write:	generic_file_aio_write,
+	kvec_read:	raw_kvec_read,
+	kvec_write:	raw_kvec_write,
 };
 
 static struct file_operations raw_ctl_fops = {
@@ -250,7 +257,6 @@
 }
 
 
-
 ssize_t	raw_read(struct file *filp, char * buf, 
 		 size_t size, loff_t *offp)
 {
@@ -381,3 +387,99 @@
  out:	
 	return err;
 }
+
+static int raw_kvec_rw(struct file *filp, int rw, kvec_cb_t cb, size_t size, loff_t pos);
+int raw_kvec_read(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	return raw_kvec_rw(file, READ, cb, size, pos);
+}
+
+int raw_kvec_write(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	return raw_kvec_rw(file, WRITE, cb, size, pos);
+}
+
+int	raw_kvec_rw(struct file *filp, int rw, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	int		err;
+	unsigned	minor;
+	kdev_t		dev;
+	unsigned long	limit, blocknr, blocks;
+
+	unsigned	sector_size, sector_bits, sector_mask;
+	unsigned	max_sectors;
+	unsigned	i;
+
+	pr_debug("raw_kvec_rw: %p %d %d %p %d %d %Lu\n", filp, rw, nr, kiovec, flags, size, pos);
+	/*
+	 * First, a few checks on device size limits 
+	 */
+
+	minor = MINOR(filp->f_dentry->d_inode->i_rdev);
+	dev = to_kdev_t(raw_devices[minor].binding->bd_dev);
+	sector_size = raw_devices[minor].sector_size;
+	sector_bits = raw_devices[minor].sector_bits;
+	sector_mask = sector_size- 1;
+	max_sectors = 25000; //KIO_MAX_SECTORS >> (sector_bits - 9);
+	
+	if (blk_size[MAJOR(dev)])
+		limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
+	else
+		limit = INT_MAX;
+	pr_debug ("raw_kvec_rw: dev %d:%d (+%d)\n",
+		 MAJOR(dev), MINOR(dev), limit);
+
+	/* EOF at the end */
+	err = 0;
+	if (!size || (pos >> sector_bits) == limit) {
+		pr_debug("raw_kvec_rw: %Lu > %lu, %d\n", pos >> sector_bits, limit, sector_bits);
+		cb.fn(cb.data, cb.vec, err);
+		return 0;
+	}
+
+	/* ENXIO for io beyond the end */
+	err = -ENXIO;
+	if ((pos >> sector_bits) >= limit) {
+		pr_debug("raw_kvec_rw: %Lu > %lu, %d\n", pos >> sector_bits, limit, sector_bits);
+		goto out;
+	}
+
+	err = -EINVAL;
+	if ((pos < 0) || (pos & sector_mask) || (size & sector_mask)) {
+		pr_debug("pos(%Ld)/size(%lu) wrong(%d)\n", pos, size, sector_mask);
+		goto out;
+	}
+
+	/* Verify that the scatter-gather list is sector aligned. */
+	for (i=0; i<cb.vec->nr; i++)
+		if ((cb.vec->veclet[i].offset & sector_mask) ||
+		    (cb.vec->veclet[i].length & sector_mask)) {
+			pr_debug("veclet offset/length wrong");
+			goto out;
+		}
+
+	/*
+	 * Split the IO into KIO_MAX_SECTORS chunks, mapping and
+	 * unmapping the single kiobuf as we go to perform each chunk of
+	 * IO.  
+	 */
+
+	blocknr = pos >> sector_bits;
+	blocks = size >> sector_bits;
+	if (blocks > max_sectors)
+		blocks = max_sectors;
+	if (blocks > limit - blocknr)
+		blocks = limit - blocknr;
+	err = -ENXIO;
+	if (!blocks) {
+		pr_debug("raw: !blocks %d %ld %ld\n", max_sectors, limit, blocknr);
+		goto out;
+	}
+
+	err = brw_kvec_async(rw, cb, dev, blocks, blocknr, sector_bits);
+out:
+	if (err)
+		printk(KERN_DEBUG "raw_kvec_rw: ret is %d\n", err);
+	return err;
+}
+
diff -urN v2.4.19-pre5/fs/Makefile linux.diff/fs/Makefile
--- v2.4.19-pre5/fs/Makefile	Thu Mar  7 16:40:03 2002
+++ linux.diff/fs/Makefile	Tue Apr 30 17:29:31 2002
@@ -7,12 +7,12 @@
 
 O_TARGET := fs.o
 
-export-objs :=	filesystems.o open.o dcache.o buffer.o
+export-objs :=	filesystems.o open.o dcache.o buffer.o fcblist.o
 mod-subdirs :=	nls
 
 obj-y :=	open.o read_write.o devices.o file_table.o buffer.o \
 		super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \
-		fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \
+		fcntl.o ioctl.o readdir.o select.o fifo.o locks.o fcblist.o \
 		dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \
 		filesystems.o namespace.o seq_file.o
 
@@ -22,6 +22,9 @@
 obj-y += noquot.o
 endif
 
+obj-y += aio.o
+export-objs += aio.o
+
 subdir-$(CONFIG_PROC_FS)	+= proc
 subdir-y			+= partitions
 
diff -urN v2.4.19-pre5/fs/aio.c linux.diff/fs/aio.c
--- v2.4.19-pre5/fs/aio.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/fs/aio.c	Tue May 14 13:15:31 2002
@@ -0,0 +1,1449 @@
+/* fs/aio.c
+ *	An async IO implementation for Linux
+ *	Written by Benjamin LaHaise <bcrl@redhat.com>
+ *
+ *	Implements an efficient asynchronous io interface.
+ *
+ *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License along
+ *   with this program; if not, write to the Free Software Foundation, Inc.,
+ *   59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+//#define DEBUG 1
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/vmalloc.h>
+#include <linux/iobuf.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/brlock.h>
+#include <linux/aio.h>
+#include <linux/smp_lock.h>
+#include <linux/compiler.h>
+#include <linux/poll.h>
+#include <linux/brlock.h>
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <asm/vsyscall.h>
+
+#if DEBUG > 1
+#define dprintk		printk
+#else
+#define dprintk(x...)	do { ; } while (0)
+#endif
+
+/*------ sysctl variables----*/
+unsigned aio_nr;		/* current system wide number of aio requests */
+unsigned aio_max_nr = 0x10000;	/* system wide maximum number of aio requests */
+unsigned aio_max_size = 0x20000;	/* 128KB per chunk */
+unsigned aio_max_pinned;		/* set to mem/4 in aio_setup */
+/*----end sysctl variables---*/
+
+static kmem_cache_t	*kiocb_cachep;
+static kmem_cache_t	*kioctx_cachep;
+
+/* tunable.  Needs to be added to sysctl. */
+int max_aio_reqs = 0x10000;
+
+/* Used for rare fput completion. */
+static void aio_fput_routine(void *);
+static struct tq_struct	fput_tqueue = {
+	routine:	aio_fput_routine,
+};
+
+static spinlock_t	fput_lock = SPIN_LOCK_UNLOCKED;
+LIST_HEAD(fput_head);
+
+/* forward prototypes */
+static void generic_aio_complete_read(void *_iocb, struct kvec *vec, ssize_t res);
+static void generic_aio_complete_write(void *_iocb, struct kvec *vec, ssize_t res);
+
+/* aio_setup
+ *	Creates the slab caches used by the aio routines, panic on
+ *	failure as this is done early during the boot sequence.
+ */
+static int __init aio_setup(void)
+{
+	kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb),
+				0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!kiocb_cachep)
+		panic("unable to create kiocb cache\n");
+
+	kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx),
+				0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!kioctx_cachep)
+		panic("unable to create kioctx cache");
+
+	aio_max_pinned = num_physpages/4;
+
+	printk(KERN_NOTICE "aio_setup: num_physpages = %u\n", aio_max_pinned);
+	printk(KERN_NOTICE "aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
+
+	return 0;
+}
+
+static void ioctx_free_reqs(struct kioctx *ctx)
+{
+	struct list_head *pos, *next;
+	list_for_each_safe(pos, next, &ctx->free_reqs) {
+		struct kiocb *iocb = list_kiocb(pos);
+		list_del(&iocb->list);
+		kmem_cache_free(kiocb_cachep, iocb);
+	}
+}
+
+static void aio_free_ring(struct kioctx *ctx)
+{
+	struct aio_ring_info *info = &ctx->ring_info;
+	int i;
+
+	if (info->kvec) {
+		unmap_kvec(info->kvec, 1);
+		free_kvec(info->kvec);
+	}
+
+	if (info->mmap_size) {
+		down_write(&ctx->mm->mmap_sem);
+		do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
+		up_write(&ctx->mm->mmap_sem);
+	}
+
+#if 0
+	for (i=0; i<info->nr_pages; i++) {
+		if (info->pages[i])
+			free_page((unsigned long)info->pages[i]);
+		info->pages[i] = NULL;
+	}
+#endif
+
+	if (info->pages && info->pages != info->internal_pages)
+		kfree(info->pages);
+	info->pages = NULL;
+	info->nr = 0;
+}
+
+struct page *aio_ring_nopage(struct vm_area_struct *vma, unsigned long address, int unused)
+{
+	struct kioctx *ctx = vma->vm_file->private_data;
+	struct aio_ring_info *info = &ctx->ring_info;
+	struct page *page = NULL;
+	unsigned long offset = address - vma->vm_start;
+	offset >>= PAGE_SHIFT;
+	offset += vma->vm_pgoff;
+	if (offset < info->nr_pages)
+		page = virt_to_page(info->pages[offset]);
+
+	printk("aio_ring_nopage(%lu) = pages[%lu] = %p\n", address, offset, page);
+	return page;
+}
+
+struct vm_operations_struct aio_ring_vops = {
+	nopage:	aio_ring_nopage,
+};
+
+int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct kioctx *ctx = file->private_data;
+	struct aio_ring_info *info = &ctx->ring_info;
+
+	if (vma->vm_pgoff)
+		BUG();
+	if (vma->vm_end - vma->vm_start != info->mmap_size)
+		BUG();
+	vma->vm_ops = &aio_ring_vops;
+	return 0;
+}
+
+struct file_operations aio_ring_fops = {
+	mmap:	aio_ring_mmap,
+};
+
+static int aio_setup_ring(struct kioctx *ctx)
+{
+	struct aio_ring_info *info = &ctx->ring_info;
+	unsigned nr_reqs = ctx->max_reqs;
+	unsigned long size;
+	int failed = 0;
+	int nr_pages, i;
+
+	/* Compensate for the ring buffer's head/tail overlap entry */
+	nr_reqs += 2;	/* 1 is required, 2 for good luck */
+
+	size = sizeof(struct aio_ring);
+	size += sizeof(struct io_event) * nr_reqs;
+	nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+	if (nr_pages < 0)
+		return -EINVAL;
+
+	info->nr_pages = nr_pages;
+
+	nr_reqs = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+
+	info->nr = 0;
+	info->pages = info->internal_pages;
+	if (nr_pages > AIO_RING_PAGES) {
+		info->pages = kmalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
+		if (!info->pages)
+			return -ENOMEM;
+		memset(info->pages, 0, sizeof(void *) * nr_pages);
+	}
+
+#if 0
+	for (i=0; i<nr_pages; i++) {
+		info->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
+		failed |= (info->pages[i] == 0);
+	}
+
+	if (unlikely(failed)) {
+		for (i=0; i<nr_pages; i++) {
+			if (info->pages[i])
+				free_page((unsigned long)info->pages[i]);
+			info->pages[i] = 0;
+		}
+		return -ENOMEM;
+	}
+#endif
+#if 0
+	/* FIXME: this is horrifically insecure */
+	info->mmap_file = get_empty_filp();
+	if (!info->mmap_file) {
+		aio_free_ring(ctx);
+		return -ENOMEM;
+	}
+	{
+		struct inode *inode = get_empty_inode();
+		struct qstr str;
+		str.name = "foo";
+		str.len = 3;
+		str.hash = 0;
+		info->mmap_file->f_dentry = d_alloc(NULL, &str);
+		if (!info->mmap_file->f_dentry)
+			BUG();
+		d_add(info->mmap_file->f_dentry, inode);
+	}
+	info->mmap_file->f_mode = FMODE_READ | FMODE_WRITE;
+	info->mmap_file->private_data = ctx;
+	info->mmap_file->f_op = &aio_ring_fops;
+#endif
+	info->mmap_size = nr_pages * PAGE_SIZE;
+	printk("attempting mmap of %lu bytes\n", info->mmap_size);
+	down_write(&ctx->mm->mmap_sem);
+	info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
+				  PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
+				  0);
+	up_write(&ctx->mm->mmap_sem);
+	if (IS_ERR(info->mmap_base)) {
+		printk("mmap err: %ld\n", -info->mmap_base);
+		info->mmap_size = 0;
+		aio_free_ring(ctx);
+		return -EAGAIN;
+	}
+	printk("mmap address: 0x%08lx\n", info->mmap_base);
+	info->kvec = map_user_kvec(READ, info->mmap_base, info->mmap_size);
+	if (IS_ERR(info->kvec))
+		BUG();	/* FIXME */
+
+	if (info->kvec->nr != nr_pages)
+		BUG();
+
+	for (i=0; i<nr_pages; i++) {
+		info->pages[i] = page_address(info->kvec->veclet[i].page);	/* FIXME! Will not work on HIGHMEM */
+		//printk("[%d] %p -> %p\n", i, info->kvec->veclet[i].page,
+		//	info->pages[i]);
+	}
+
+
+	ctx->user_id = info->mmap_base;
+
+	info->ring = (struct aio_ring *)info->pages[0];
+	info->nr = nr_reqs;		/* trusted copy */
+	info->ring->nr = nr_reqs;	/* user copy */
+	info->ring->id = ctx->user_id;
+
+	return 0;
+}
+
+static inline struct io_event *aio_ring_event(struct aio_ring_info *info, int nr)
+{
+#define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
+#define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
+
+	if (nr < AIO_EVENTS_FIRST_PAGE)
+		return &info->ring->io_events[nr];
+	nr -= AIO_EVENTS_FIRST_PAGE;
+	return ((struct io_event *)info->pages[1 + nr / AIO_EVENTS_PER_PAGE]) +
+		(nr % AIO_EVENTS_PER_PAGE);
+}
+
+/* ioctx_alloc
+ *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
+ */
+static struct kioctx *ioctx_alloc(unsigned nr_reqs)
+{
+	struct kioctx *ctx;
+	unsigned i;
+
+	/* Prevent overflows */
+	if ((nr_reqs > (0x10000000U / sizeof(struct io_event))) ||
+	    (nr_reqs > (0x10000000U / sizeof(struct kiocb)))) {
+		pr_debug("ENOMEM: nr_reqs too high\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (nr_reqs > aio_max_nr)
+		return ERR_PTR(-EAGAIN);
+
+	ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->max_reqs = nr_reqs;
+	ctx->mm = current->mm;
+	atomic_inc(&ctx->mm->mm_count);
+
+	atomic_set(&ctx->users, 1);
+	spin_lock_init(&ctx->lock);
+	spin_lock_init(&ctx->ring_lock);
+	init_waitqueue_head(&ctx->wait);
+
+	INIT_LIST_HEAD(&ctx->free_reqs);
+	INIT_LIST_HEAD(&ctx->active_reqs);
+	//ctx->user_id = ++current->mm->new_ioctx_id;
+
+	if (aio_setup_ring(ctx) < 0)
+		goto out_freectx;
+
+	/* Allocate nr_reqs iocbs for io.  Free iocbs are on the 
+	 * ctx->free_reqs list.  When active they migrate to the 
+	 * active_reqs list.  During completion and cancellation 
+	 * the request may temporarily not be on any list.
+	 */
+	for (i=0; i<nr_reqs; i++) {
+		struct kiocb *iocb = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
+		if (!iocb)
+			goto out_freering;
+		memset(iocb, 0, sizeof(*iocb));
+		iocb->key = i;
+		iocb->users = 0;
+		list_add(&iocb->list, &ctx->free_reqs);
+	}
+
+	/* now link into global list.  kludge.  FIXME */
+	br_write_lock(BR_AIO_REQ_LOCK);			
+	if (unlikely(aio_nr + ctx->max_reqs > aio_max_nr))
+		goto out_cleanup;
+	aio_nr += ctx->max_reqs;	/* undone by __put_ioctx */
+	ctx->next = current->mm->ioctx_list;
+	current->mm->ioctx_list = ctx;
+	br_write_unlock(BR_AIO_REQ_LOCK);
+
+	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+		ctx, ctx->user_id, current->mm, ctx->ring_info.ring->nr);
+	return ctx;
+
+out_cleanup:
+	br_write_unlock(BR_AIO_REQ_LOCK);
+	ctx->max_reqs = 0;	/* prevent __put_ioctx from sub'ing aio_nr */
+	__put_ioctx(ctx);
+	return ERR_PTR(-EAGAIN);
+
+out_freering:
+	aio_free_ring(ctx);
+out_freereqs:
+	ioctx_free_reqs(ctx);
+out_freectx:
+	kmem_cache_free(kioctx_cachep, ctx);
+	ctx = ERR_PTR(-ENOMEM);
+
+	dprintk("aio: error allocating ioctx %p\n", ctx);
+	return ctx;
+}
+
+/* aio_cancel_all
+ *	Cancels all outstanding aio requests on an aio context.  Used 
+ *	when the processes owning a context have all exited to encourage 
+ *	the rapid destruction of the kioctx.
+ */
+static void aio_cancel_all(struct kioctx *ctx)
+{
+	int (*cancel)(struct kiocb *);
+	spin_lock_irq(&ctx->lock);
+	ctx->dead = 1;
+	while (!list_empty(&ctx->active_reqs)) {
+		struct list_head *pos = ctx->active_reqs.next;
+		struct kiocb *iocb = list_kiocb(pos);
+		list_del_init(&iocb->list);
+		cancel = iocb->cancel;
+		if (cancel)
+			iocb->users++;
+		spin_unlock_irq(&ctx->lock);
+		if (cancel)
+			cancel(iocb);
+		spin_lock_irq(&ctx->lock);
+	}
+	spin_unlock_irq(&ctx->lock);
+}
+
+void wait_for_all_aios(struct kioctx *ctx)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	if (!ctx->reqs_active)
+		return;
+
+	add_wait_queue(&ctx->wait, &wait);
+	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	while (ctx->reqs_active) {
+		printk("ctx->reqs_active = %d\n", ctx->reqs_active);
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+	set_task_state(tsk, TASK_RUNNING);
+	remove_wait_queue(&ctx->wait, &wait);
+}
+
+/* exit_aio: called when the last user of mm goes away.  At this point, 
+ * there is no way for any new requests to be submited or any of the 
+ * io_* syscalls to be called on the context.  However, there may be 
+ * outstanding requests which hold references to the context; as they 
+ * go away, they will call put_ioctx and release any pinned memory
+ * associated with the request (held via struct page * references).
+ */
+void exit_aio(struct mm_struct *mm)
+{
+	struct kioctx *ctx = mm->ioctx_list;
+	mm->ioctx_list = NULL;
+	while (ctx) {
+		struct kioctx *next = ctx->next;
+		ctx->next = NULL;
+		aio_cancel_all(ctx);
+
+		wait_for_all_aios(ctx);
+
+		if (1 != atomic_read(&ctx->users))
+			printk(KERN_DEBUG
+				"exit_aio:ioctx still alive: %d %d %d\n",
+				atomic_read(&ctx->users), ctx->dead,
+				ctx->reqs_active);
+		put_ioctx(ctx);
+		ctx = next;
+	}
+}
+
+/* __put_ioctx
+ *	Called when the last user of an aio context has gone away,
+ *	and the struct needs to be freed.
+ */
+void __put_ioctx(struct kioctx *ctx)
+{
+	unsigned nr_reqs = ctx->max_reqs;
+
+	if (ctx->reqs_active)
+		BUG();
+
+	aio_free_ring(ctx);
+	mmdrop(ctx->mm);
+	ctx->mm = NULL;
+	pr_debug("__put_ioctx: freeing %p\n", ctx);
+	ioctx_free_reqs(ctx);
+	kmem_cache_free(kioctx_cachep, ctx);
+
+	br_write_lock(BR_AIO_REQ_LOCK);
+	aio_nr -= nr_reqs;
+	br_write_unlock(BR_AIO_REQ_LOCK);
+}
+
+/* aio_get_req
+ *	Allocate a slot for an aio request.  Increments the users count
+ * of the kioctx so that the kioctx stays around until all requests are
+ * complete.  Returns -EAGAIN if no requests are free.
+ */
+static inline struct kiocb *__aio_get_req(struct kioctx *ctx)
+{
+	struct kiocb *req = NULL;
+
+	/* Use cmpxchg instead of spin_lock? */
+	spin_lock_irq(&ctx->lock);
+	if (!list_empty(&ctx->free_reqs) &&
+	    (ctx->reqs_active < aio_ring_avail(&ctx->ring_info))) {
+		req = list_kiocb(ctx->free_reqs.next);
+		list_del(&req->list);
+		list_add(&req->list, &ctx->active_reqs);
+		ctx->reqs_active++;
+		req->user_obj = NULL;
+		get_ioctx(ctx);
+
+		if (req->ctx)
+			BUG();
+		req->ctx = ctx;
+		if (req->users)
+			BUG();
+		req->users = 1;
+	}
+	spin_unlock_irq(&ctx->lock);
+
+	return req;
+}
+
+static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+{
+	struct kiocb *req;
+	/* Handle a potential starvation case -- should be exceedingly rare as 
+	 * requests will be stuck on fput_head only if the aio_fput_routine is 
+	 * delayed and the requests were the last user of the struct file.
+	 */
+	req = __aio_get_req(ctx);
+	if (unlikely(NULL == ctx)) {
+		aio_fput_routine(NULL);
+		req = __aio_get_req(ctx);
+	}
+	return req;
+}
+
+static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
+{
+	req->ctx = NULL;
+	req->filp = NULL;
+	req->user_obj = NULL;
+	ctx->reqs_active--;
+	list_add(&req->list, &ctx->free_reqs);
+
+	if (!ctx->reqs_active && ctx->dead)
+		wake_up(&ctx->wait);
+}
+
+static void aio_fput_routine(void *data)
+{
+	spin_lock_irq(&fput_lock);
+	while (likely(!list_empty(&fput_head))) {
+		struct kiocb *req = list_kiocb(fput_head.next);
+		struct kioctx *ctx = req->ctx;
+
+		list_del(&req->list);
+		spin_unlock_irq(&fput_lock);
+
+		/* Complete the fput */
+		__fput(req->filp);
+
+		/* Link the iocb into the context's free list */
+		spin_lock_irq(&ctx->lock);
+		really_put_req(ctx, req);
+		spin_unlock_irq(&ctx->lock);
+
+		put_ioctx(ctx);
+		spin_lock_irq(&fput_lock);
+	}
+	spin_unlock_irq(&fput_lock);
+}
+
+/* __aio_put_req
+ *	Returns true if this put was the last user of the request.
+ */
+static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
+{
+	dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n",
+		req, atomic_read(&req->filp->f_count));
+
+	req->users --;
+	if (unlikely(req->users < 0))
+		BUG();
+	if (req->users)
+		return 0;
+	list_del(&req->list);		/* remove from active_reqs */
+	req->cancel = NULL;
+
+	/* Must be done under the lock to serialise against cancellation.
+	 * Call this aio_fput as it duplicates fput via the fput_tqueue.
+	 */
+	if (unlikely(atomic_dec_and_test(&req->filp->f_count))) {
+		get_ioctx(ctx);
+		spin_lock(&fput_lock);
+		list_add(&req->list, &fput_head);
+		spin_unlock(&fput_lock);
+		schedule_task(&fput_tqueue);
+	} else
+		really_put_req(ctx, req);
+	return 1;
+}
+
+/* aio_put_req
+ *	Returns true if this put was the last user of the kiocb,
+ *	false if the request is still in use.
+ */
+int aio_put_req(struct kiocb *req)
+{
+	struct kioctx *ctx = req->ctx;
+	int ret;
+	spin_lock_irq(&ctx->lock);
+	ret = __aio_put_req(ctx, req);
+	spin_unlock_irq(&ctx->lock);
+	if (ret)
+		put_ioctx(ctx);
+	return ret;
+}
+
+/*	Lookup an ioctx id.  ioctx_list is lockless for reads.
+ *	FIXME: this is O(n) and is only suitable for development.
+ */
+static inline struct kioctx *lookup_ioctx(unsigned long ctx_id)
+{
+	struct kioctx *ioctx;
+	struct mm_struct *mm;
+
+	br_read_lock(BR_AIO_REQ_LOCK);
+	mm = current->mm;
+	for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next)
+		if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) {
+			get_ioctx(ioctx);
+			break;
+		}
+	br_read_unlock(BR_AIO_REQ_LOCK);
+
+	return ioctx;
+}
+
+/* aio_complete
+ *	Called when the io request on the given iocb is complete.
+ *	Returns true if this is the last user of the request.  The 
+ *	only other user of the request can be the cancellation code.
+ */
+int aio_complete(struct kiocb *iocb, long res, long res2)
+{
+	struct kioctx	*ctx = iocb->ctx;
+	struct aio_ring_info	*info = &ctx->ring_info;
+	struct aio_ring	*ring = info->ring;
+	struct io_event	*event;
+	unsigned long	flags;
+	unsigned long	tail;
+	int		ret;
+
+	/* add a completion event to the ring buffer.
+	 * must be done holding ctx->lock to prevent
+	 * other code from messing with the tail
+	 * pointer since we might be called from irq
+	 * context.
+	 */
+	spin_lock_irqsave(&ctx->lock, flags);
+
+	tail = info->tail;
+	event = aio_ring_event(info, tail);
+	tail = (tail + 1) % info->nr;
+
+	event->obj = (u64)(unsigned long)iocb->user_obj;
+	event->data = iocb->user_data;
+	event->res = res;
+	event->res2 = res2;
+
+	dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
+		ctx, tail, iocb, iocb->user_obj, iocb->user_data, res, res2);
+
+	/* after flagging the request as done, we
+	 * must never even look at it again
+	 */
+	barrier();
+
+	info->tail = tail;
+	ring->tail = tail;
+
+	wmb();
+	if (!ring->woke)
+		ring->woke = 1;
+
+	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
+
+	/* everything turned out well, dispose of the aiocb. */
+	ret = __aio_put_req(ctx, iocb);
+
+	spin_unlock_irqrestore(&ctx->lock, flags);
+
+	wake_up(&ctx->wait);
+	if (ret)
+		put_ioctx(ctx);
+
+	return ret;
+}
+
+/* aio_read_evt
+ *	Pull an event off of the ioctx's event ring.  Returns the number of 
+ *	events fetched (0 or 1 ;-)
+ *	FIXME: make this use cmpxchg.
+ *	TODO: make the ringbuffer user mmap()able (requires FIXME).
+ */
+static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
+{
+	struct aio_ring_info *info = &ioctx->ring_info;
+	struct aio_ring *ring = info->ring;
+	unsigned long head;
+	int ret = 0;
+
+	dprintk("in aio_read_evt h%lu t%lu m%lu\n",
+		 (unsigned long)ring->head, (unsigned long)ring->tail,
+		 (unsigned long)ring->nr);
+	barrier();
+	if (ring->head == ring->tail)
+		goto out;
+
+	spin_lock(&info->ring_lock);
+
+	head = ring->head % info->nr;
+	if (head != ring->tail) {
+		struct io_event *evp = aio_ring_event(info, head);
+		*ent = *evp;
+		head = (head + 1) % info->nr;
+		barrier();
+		ring->head = head;
+		ret = 1;
+	}
+	spin_unlock(&info->ring_lock);
+
+out:
+	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
+		 (unsigned long)ring->head, (unsigned long)ring->tail);
+	return ret;
+}
+
+struct timeout {
+	struct timer_list	timer;
+	int			timed_out;
+	wait_queue_head_t	wait;
+};
+
+static void timeout_func(unsigned long data)
+{
+	struct timeout *to = (struct timeout *)data;
+
+	to->timed_out = 1;
+	wake_up(&to->wait);
+}
+
+static inline void init_timeout(struct timeout *to)
+{
+	init_timer(&to->timer);
+	to->timer.data = (unsigned long)to;
+	to->timer.function = timeout_func;
+	to->timed_out = 0;
+	init_waitqueue_head(&to->wait);
+}
+
+static inline void set_timeout(struct timeout *to, const struct timespec *ts)
+{
+	unsigned long how_long;
+
+	if (!ts->tv_sec && !ts->tv_nsec) {
+		to->timed_out = 1;
+		return;
+	}
+
+	how_long = ts->tv_sec * HZ;
+#define HZ_NS (1000000000 / HZ)
+	how_long += (ts->tv_nsec + HZ_NS - 1) / HZ_NS;
+	
+	to->timer.expires = jiffies + how_long;
+	add_timer(&to->timer);
+}
+
+static inline void clear_timeout(struct timeout *to)
+{
+	del_timer_sync(&to->timer);
+}
+
+static int read_events(struct kioctx *ctx, int nr, struct io_event *event,
+			const struct timespec *timeout)
+{
+	struct task_struct	*tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+	DECLARE_WAITQUEUE(to_wait, tsk);
+	int			ret;
+	int			i = 0;
+	struct io_event		ent;
+	struct timeout		to;
+
+	/* needed to zero any padding within an entry (there shouldn't be 
+	 * any, but C is fun!
+	 */
+	memset(&ent, 0, sizeof(ent));
+	ret = 0;
+
+	while (likely(i < nr)) {
+		ret = aio_read_evt(ctx, &ent);
+		if (unlikely(ret <= 0))
+			break;
+
+		dprintk("read event: %Lx %Lx %Lx %Lx\n",
+			ent.data, ent.obj, ent.res, ent.res2);
+
+		/* FIXME: split checks in two */
+		ret = -EFAULT;
+		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
+			dprintk("aio: lost an event due to EFAULT.\n");
+			break;
+		}
+		ret = 0;
+
+		/* Good, event copied to userland, update counts. */
+		event ++;
+		i ++;
+	}
+
+	if (i)
+		return i;
+	if (ret)
+		return ret;
+
+	/* End fast path */
+
+	init_timeout(&to);
+	if (timeout) {
+		struct timespec	ts;
+		ret = -EFAULT;
+		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
+			goto out;
+
+		set_timeout(&to, &ts);
+		if (to.timed_out)
+			timeout = 0;
+	}
+
+	while (likely(i < nr)) {
+		add_wait_queue_exclusive_lifo(&ctx->wait, &wait);
+		add_wait_queue(&to.wait, &to_wait);
+		do {
+			set_task_state(tsk, TASK_INTERRUPTIBLE);
+
+			ret = aio_read_evt(ctx, &ent);
+			if (ret)
+				break;
+			if (i)
+				break;
+			ret = 0;
+			if (to.timed_out)	/* Only check after read evt */
+				break;
+			schedule();
+			if (signal_pending(tsk)) {
+				ret = -EINTR;
+				break;
+			}
+			/*ret = aio_read_evt(ctx, &ent);*/
+		} while (1) ;
+
+		set_task_state(tsk, TASK_RUNNING);
+		remove_wait_queue(&ctx->wait, &wait);
+		remove_wait_queue(&to.wait, &to_wait);
+
+		if (unlikely(ret <= 0))
+			break;
+
+		ret = -EFAULT;
+		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
+			dprintk("aio: lost an event due to EFAULT.\n");
+			break;
+		}
+
+		/* Good, event copied to userland, update counts. */
+		event ++;
+		i ++;
+	}
+
+	if (timeout)
+		clear_timeout(&to);
+out:
+	return i ? i : ret;
+}
+
+/* Take an ioctx and remove it from the list of ioctx's.  Protects 
+ * against races with itself via ->dead.
+ */
+static void io_destroy(struct kioctx *ioctx)
+{
+	struct kioctx **tmp;
+	int was_dead;
+
+	/* delete the entry from the list is someone else hasn't already */
+	br_write_lock(BR_AIO_REQ_LOCK);
+	was_dead = ioctx->dead;
+	ioctx->dead = 1;
+	for (tmp = &current->mm->ioctx_list; *tmp && *tmp != ioctx;
+	     tmp = &(*tmp)->next)
+		;
+	if (*tmp)
+		*tmp = ioctx->next;
+	br_write_unlock(BR_AIO_REQ_LOCK);
+
+	dprintk("aio_release(%p)\n", ioctx);
+	if (likely(!was_dead))
+		put_ioctx(ioctx);	/* twice for the list */
+
+	aio_cancel_all(ioctx);
+	wait_for_all_aios(ioctx);
+	put_ioctx(ioctx);	/* once for the lookup */
+}
+
+asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t *ctxp)
+{
+	struct kioctx *ioctx = NULL;
+	unsigned long ctx;
+	long ret;
+
+	ret = get_user(ctx, ctxp);
+	if (unlikely(ret))
+		goto out;
+
+	ret = -EINVAL;
+	if (unlikely(ctx || !nr_reqs || (int)nr_reqs < 0)) {
+		pr_debug("EINVAL: io_setup: ctx or nr_reqs > max\n");
+		goto out;
+	}
+
+	ret = -EAGAIN;
+	if (unlikely(nr_reqs > max_aio_reqs))
+		goto out;
+
+	ioctx = ioctx_alloc(nr_reqs);
+	ret = PTR_ERR(ioctx);
+	if (!IS_ERR(ioctx)) {
+		ret = put_user(ioctx->user_id, ctxp);
+		if (!ret)
+			return 0;
+		io_destroy(ioctx);
+	}
+
+out:
+	return ret;
+}
+
+/* aio_release
+ *	Release the kioctx associated with the userspace handle.
+ */
+asmlinkage long sys_io_destroy(aio_context_t ctx)
+{
+	struct kioctx *ioctx = lookup_ioctx(ctx);
+	if (likely(NULL != ioctx)) {
+		io_destroy(ioctx);
+		return 0;
+	}
+	pr_debug("EINVAL: io_destroy: invalid context id\n");
+	return -EINVAL;
+}
+
+int generic_aio_poll(struct file *file, struct kiocb *req, struct iocb iocb)
+{
+	unsigned events = iocb.aio_buf;
+
+	/* Did the user set any bits they weren't supposed to? (The 
+	 * above is actually a cast.
+	 */
+	if (unlikely(events != iocb.aio_buf))
+		return -EINVAL;
+	
+	return async_poll(req, events);
+}
+
+/* sys_io_submit
+ *	Copy an aiocb from userspace into kernel space, then convert it to
+ *	a kiocb, submit and repeat until done.  Error codes on copy/submit
+ *	only get returned for the first aiocb copied as otherwise the size
+ *	of aiocbs copied is returned (standard write sematics).
+ */
+asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, struct iocb **iocbpp)
+{
+	struct kioctx *ctx;
+	long ret = 0;
+	int i;
+
+	if (unlikely(nr < 0))
+		return -EINVAL;
+
+	ctx = lookup_ioctx(ctx_id);
+	if (unlikely(!ctx)) {
+		pr_debug("EINVAL: io_submit: invalid context id\n");
+		return -EINVAL;
+	}
+
+	for (i=0; i<nr; i++) {
+		int (*op)(struct file *, struct kiocb *, struct iocb);
+		struct iocb *iocbp, tmp;
+		struct kiocb *req;
+		struct file *file;
+
+		ret = get_user(iocbp, iocbpp + i);
+		if (unlikely(ret))
+			break;
+
+		ret = -EFAULT;
+		if (copy_from_user(&tmp, iocbp, sizeof(tmp)))
+			break;
+
+		ret = -EINVAL;
+		/* enforce forwards compatibility on users */
+		if (unlikely(tmp.aio_reserved1 || tmp.aio_reserved2 || tmp.aio_reserved3)) {
+			pr_debug("EINVAL: io_submit: reserve field set\n");
+			break;
+		}
+
+		/* prevent overflows */
+		if (unlikely(
+		    (tmp.aio_buf != (unsigned long)tmp.aio_buf) ||
+		    (tmp.aio_nbytes != (size_t)tmp.aio_nbytes) ||
+		    ((ssize_t)tmp.aio_nbytes < 0)
+		   )) {
+			pr_debug("EINVAL: io_submit: overflow check\n");
+			break;
+		}
+
+		file = fget(tmp.aio_fildes);
+		ret = -EBADF;
+		if (unlikely(!file))
+			break;
+
+		req = aio_get_req(ctx);
+		ret = -EAGAIN;
+		if (unlikely(!req)) {
+			fput(file);
+			break;
+		}
+
+		req->filp = file;
+		tmp.aio_key = req->key;
+		ret = put_user(tmp.aio_key, &iocbp->aio_key);
+		if (unlikely(ret)) {
+			dprintk("EFAULT: aio_key\n");
+			goto out_put_req;
+		}
+
+		req->user_obj = iocbp;
+		req->user_data = tmp.aio_data;
+		req->buf = tmp.aio_buf;
+		req->pos = tmp.aio_offset;
+		req->size = tmp.aio_nbytes;
+		req->nr_transferred = 0;
+
+		switch (tmp.aio_lio_opcode) {
+		case IOCB_CMD_PREAD:
+			op = file->f_op->aio_read;
+			ret = -EBADF;
+			if (!(file->f_mode & FMODE_READ))
+				goto out_put_req;
+			break;
+		case IOCB_CMD_PREADX:
+			op = file->f_op->aio_readx;
+			ret = -EBADF;
+			if (!(file->f_mode & FMODE_READ))
+				goto out_put_req;
+			break;
+		case IOCB_CMD_PWRITE:
+			op = file->f_op->aio_write;
+			ret = -EBADF;
+			if (!(file->f_mode & FMODE_WRITE))
+				goto out_put_req;
+			break;
+		case IOCB_CMD_FSYNC:
+			op = file->f_op->aio_fsync;
+			break;
+		case IOCB_CMD_POLL:
+			//op = generic_aio_poll;
+			break;
+		default:
+			op = NULL;
+			break;
+		}
+
+		ret = -EINVAL;
+		if (unlikely(!op)) {
+			pr_debug("EINVAL: io_submit: no operation provided\n");
+			goto out_put_req;
+		}
+
+		ret = op(file, req, tmp);
+		if (likely(!ret))
+			continue;
+
+		pr_debug("io_submit: op returned %ld\n", ret);
+		aio_complete(req, ret, 0);
+		ret = 0;	/* A completion event was sent, so 
+				 * submit is a success. */
+		continue;
+
+	out_put_req:
+		aio_put_req(req);
+		break;
+	}
+
+	put_ioctx(ctx);
+	run_task_queue(&tq_disk);
+	return i ? i : ret;
+}
+
+static void generic_aio_next_chunk(void *_iocb)
+{
+	int (*kvec_op)(struct file *, kvec_cb_t, size_t, loff_t);
+	struct kiocb *iocb = _iocb;
+	int rw = iocb->this_size;
+	unsigned long buf = iocb->buf;
+	kvec_cb_t cb;
+	ssize_t res;
+
+	iocb->this_size = iocb->size - iocb->nr_transferred;
+	if (iocb->this_size > aio_max_size)
+		iocb->this_size = aio_max_size;
+
+	buf += iocb->nr_transferred;
+	cb.vec = mm_map_user_kvec(iocb->ctx->mm, rw, buf, iocb->this_size);
+	cb.fn = (rw == READ) ? generic_aio_complete_read
+			     : generic_aio_complete_write;
+	cb.data = iocb;
+
+	dprintk("generic_aio_rw: cb.vec=%p\n", cb.vec);
+	if (unlikely(IS_ERR(cb.vec)))
+		goto done;
+
+	kvec_op = (rw == READ) ? iocb->filp->f_op->kvec_read
+			       : iocb->filp->f_op->kvec_write;
+	dprintk("submit: %d %d %d\n", iocb->this_size, iocb->nr_transferred, iocb->size);
+	res = kvec_op(iocb->filp, cb, iocb->this_size,
+		      iocb->pos + iocb->nr_transferred);
+	if (!res) {
+		dprintk("submit okay\n");
+		return;
+	}
+	dprintk("submit failed: %d\n", res);
+	
+	cb.fn(cb.data, cb.vec, res);
+	return;
+
+done:
+	if (!iocb->nr_transferred)
+		BUG();
+	aio_complete(iocb, iocb->nr_transferred, 0);
+}
+
+static void generic_aio_complete_rw(int rw, void *_iocb, struct kvec *vec, ssize_t res)
+{
+	struct kiocb *iocb = _iocb;
+
+	unmap_kvec(vec, rw == READ);
+	free_kvec(vec);
+
+	if (res > 0)
+		iocb->nr_transferred += res;
+
+	/* Was this chunk successful?  Is there more left to transfer? */
+	if (res == iocb->this_size && iocb->nr_transferred < iocb->size) {
+		/* We may be in irq context, so queue processing in 
+		 * process context.
+		 */
+		iocb->this_size = rw;
+		INIT_TQUEUE(&iocb->u.tq, generic_aio_next_chunk, iocb);
+		schedule_task(&iocb->u.tq);
+		return;
+	}
+
+	aio_complete(iocb, iocb->nr_transferred ? iocb->nr_transferred : res,
+		     0);
+}
+
+static void generic_aio_complete_read(void *_iocb, struct kvec *vec, ssize_t res)
+{
+	generic_aio_complete_rw(READ, _iocb, vec, res);
+}
+
+static void generic_aio_complete_write(void *_iocb, struct kvec *vec, ssize_t res)
+{
+	generic_aio_complete_rw(WRITE, _iocb, vec, res);
+}
+
+ssize_t generic_aio_rw(int rw, struct file *file, struct kiocb *req, struct iocb iocb, size_t min_size)
+{
+	int (*kvec_op)(struct file *, kvec_cb_t, size_t, loff_t);
+	unsigned long buf = iocb.aio_buf;
+	size_t size = iocb.aio_nbytes;
+	size_t	nr_read = 0;
+	loff_t pos = iocb.aio_offset;
+	kvec_cb_t cb;
+	ssize_t res;
+
+#if 0
+	if (likely(NULL != file->f_op->new_read)) {
+		nr_read = file->f_op->new_read(file, (void *)buf, size,
+					       &pos, F_ATOMIC);
+		dprintk("from new_read: nr_read: %ld\n", (long)nr_read);
+		if ((-EAGAIN == nr_read) || (-EWOULDBLOCKIO == nr_read))
+			nr_read = 0;
+		else if ((nr_read >= min_size) || (nr_read < 0)) {
+			dprintk("returning nr_read: %ld\n", (long)nr_read);
+			return nr_read;
+		}
+	}
+	dprintk("nr_read: %ld\n", (long)nr_read);
+#endif
+
+	req->nr_transferred = nr_read;
+	size -= nr_read;
+	if (size > aio_max_size)
+		/* We have to split up the request.  Pin the mm
+		 * struct for further use with map_user_kvec later.
+		 */
+		size = aio_max_size;
+	else
+		req->buf = 0;
+
+	req->this_size = size;
+
+	buf += nr_read;
+	cb.vec = map_user_kvec(rw, buf, size);
+	cb.fn = (rw == READ) ? generic_aio_complete_read
+			     : generic_aio_complete_write;
+	cb.data = req;
+
+	dprintk("generic_aio_rw: cb.vec=%p\n", cb.vec);
+	if (IS_ERR(cb.vec))
+		return nr_read ? nr_read : PTR_ERR(cb.vec);
+
+	kvec_op = (rw == READ) ? file->f_op->kvec_read : file->f_op->kvec_write;
+
+	res = kvec_op(file, cb, size, pos);
+	if (unlikely(res != 0)) {
+		/* If the first chunk was successful, we have to run
+		 * the callback to attempt the rest of the io.
+		 */
+		if (res == size && req->buf) {
+			cb.fn(cb.data, cb.vec, res);
+			return 0;
+		}
+
+		unmap_kvec(cb.vec, rw == READ);
+		free_kvec(cb.vec);
+		if (nr_read) {
+			if (res < 0)
+				res = 0;
+			res += nr_read;
+		}
+	}
+	return res;
+}
+
+ssize_t generic_file_aio_read(struct file *file, struct kiocb *req, struct iocb iocb)
+{
+	return generic_aio_rw(READ, file, req, iocb, iocb.aio_nbytes);  
+}
+
+ssize_t generic_sock_aio_read(struct file *file, struct kiocb *req, struct iocb iocb)
+{
+	return generic_aio_rw(READ, file, req, iocb, 1);	
+}
+
+ssize_t generic_aio_write(struct file *file, struct kiocb *req, struct iocb iocb, size_t min_size)
+{
+	return generic_aio_rw(WRITE, file, req, iocb, 1);
+#if 0
+	unsigned long buf = iocb.aio_buf;
+	size_t size = iocb.aio_nbytes;
+	loff_t pos = iocb.aio_offset;
+	ssize_t	nr_written = 0;
+	kvec_cb_t cb;
+	long res;
+#if 0
+	if (likely(NULL != file->f_op->new_write)) {
+		nr_written = file->f_op->new_write(file, (void *)buf, size,
+					       &pos, F_ATOMIC);
+		pr_debug("generic_aio_write: new_write: %ld\n", (long)nr_written);
+		if (-EAGAIN == nr_written)
+			nr_written = 0;
+		if ((nr_written >= min_size) || (nr_written < 0))
+			return nr_written;
+	}
+#endif
+
+	req->nr_transferred = nr_written;
+	size -= nr_written;
+	if (size > aio_max_size)
+		size = aio_max_size;
+	req->this_size = size;
+	buf += nr_written;
+	cb.vec = map_user_kvec(WRITE, buf, size);
+	cb.fn = generic_aio_complete_write;
+	cb.data = req;
+
+	if (IS_ERR(cb.vec)) {
+		pr_debug("generic_aio_write: map_user_kvec: %ld\n", PTR_ERR(cb.vec));
+		return nr_written ? nr_written : PTR_ERR(cb.vec);
+	}
+
+	res = file->f_op->kvec_write(file, cb, size, iocb.aio_offset);
+	pr_debug("generic_aio_write: kvec_write: %ld\n", res);
+	if (unlikely(res != 0)) {
+		unmap_kvec(cb.vec, 0);
+		free_kvec(cb.vec);
+		if (nr_written) {
+			if (res < 0)
+				res = 0;
+			res += nr_written;
+		}
+	}
+	return res;
+#endif
+}
+
+ssize_t generic_file_aio_write(struct file *file, struct kiocb *req, struct iocb iocb)
+{
+	return generic_aio_write(file, req, iocb, iocb.aio_nbytes);	
+}
+
+/* lookup_kiocb
+ *	Finds a given iocb for cancellation.
+ *	MUST be called with ctx->lock held.
+ */
+struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb *iocb, u32 key)
+{
+	struct list_head *pos;
+	/* TODO: use a hash or array, this sucks. */
+	list_for_each(pos, &ctx->free_reqs) {
+		struct kiocb *kiocb = list_kiocb(pos);
+		if (kiocb->user_obj == iocb && kiocb->key == key)
+			return kiocb;
+	}
+	return NULL;
+}
+
+asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb *iocb)
+{
+	int (*cancel)(struct kiocb *iocb);
+	struct kioctx *ctx;
+	struct kiocb *kiocb;
+	u32 key;
+	int ret;
+
+	ret = get_user(key, &iocb->aio_key);
+	if (unlikely(ret))
+		return ret;
+
+	ctx = lookup_ioctx(ctx_id);
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	spin_lock_irq(&ctx->lock);
+	ret = -EAGAIN;
+	kiocb = lookup_kiocb(ctx, iocb, key);
+	if (kiocb && kiocb->cancel) {
+		cancel = kiocb->cancel;
+		kiocb->users ++;
+	} else
+		cancel = NULL;
+	spin_unlock_irq(&ctx->lock);
+
+	if (NULL != cancel) {
+		printk("calling cancel\n");
+		ret = cancel(kiocb);
+	} else
+		printk("iocb has no cancel operation\n");
+
+	put_ioctx(ctx);
+
+	return ret;
+}
+
+asmlinkage long sys_io_wait(aio_context_t ctx_id, struct iocb *iocb, const struct timespec *timeout)
+{
+#if 0	/* FIXME.  later. */
+	struct kioctx *ioctx;
+	long ret = -EINVAL;
+	unsigned key;
+	long obj = (long)iocb;
+
+	ioctx = lookup_ioctx(ctx_id);
+	if (!ioctx)
+		goto out;
+
+	ret = get_user(key, &iocb->aio_key);
+	if (ret)
+		goto out;
+
+	ret = __aio_complete(ioctx, key, obj, !!timeout);
+	put_ioctx(ioctx);
+
+out:
+	return ret;
+#endif
+	return -ENOSYS;
+}
+
+asmlinkage long sys_io_getevents_slow(aio_context_t ctx_id,
+				   long nr,
+				   struct io_event *events,
+				   const struct timespec *timeout)
+{
+	struct kioctx *ioctx = lookup_ioctx(ctx_id);
+	long ret = -EINVAL;
+
+	if (likely(NULL != ioctx)) {
+		ret = read_events(ioctx, nr, events, timeout);
+		put_ioctx(ioctx);
+	}
+
+	return ret;
+}
+
+/* vsys_io_getevents: runs in userspace to fetch what io events are 
+ * available.
+ */
+__attribute__((section(".vsyscall_text")))
+asmlinkage long vsys_io_getevents(aio_context_t ctx_id,
+				   long nr,
+				   struct io_event *events,
+				   const struct timespec *timeout)
+{
+#if 1
+	struct aio_ring	*ring = (struct aio_ring *)ctx_id;
+	long i=0;
+
+	while (i < nr) {
+		unsigned head;
+
+		head = ring->head;
+		if (head == ring->tail)
+			break;
+
+		*events++ = ring->io_events[head];
+		head = (head + 1) % ring->nr;
+		ring->head = head;
+		i++;
+	}
+
+	if (i)
+		return i;
+#endif
+	return vsys_io_getevents_slow(ctx_id, nr, events, timeout);
+}
+
+__initcall(aio_setup);
+
+add_dynamic_syscall(sys_io_setup);
+add_dynamic_syscall(sys_io_destroy);
+add_dynamic_syscall(sys_io_submit);
+add_dynamic_syscall(sys_io_cancel);
+add_dynamic_syscall(sys_io_wait);
+add_dynamic_syscall(sys_io_getevents_slow);
+EXPORT_SYMBOL_GPL(generic_file_kvec_read);
+EXPORT_SYMBOL_GPL(generic_file_aio_read);
+EXPORT_SYMBOL_GPL(generic_file_kvec_write);
+EXPORT_SYMBOL_GPL(generic_file_aio_write);
+EXPORT_SYMBOL_GPL(generic_file_new_read);
diff -urN v2.4.19-pre5/fs/buffer.c linux.diff/fs/buffer.c
--- v2.4.19-pre5/fs/buffer.c	Wed Apr  3 21:04:36 2002
+++ linux.diff/fs/buffer.c	Tue Apr  2 18:56:57 2002
@@ -3084,3 +3084,220 @@
 
 module_init(bdflush_init)
 
+/* async kio interface */
+struct brw_cb {
+	kvec_cb_t		cb;
+	atomic_t		io_count;
+	int			nr;
+	struct buffer_head	*bh[1];
+};
+
+static inline void brw_cb_put(struct brw_cb *brw_cb)
+{
+	if (atomic_dec_and_test(&brw_cb->io_count)) {
+		ssize_t res = 0, err = 0;
+		int nr;
+
+		/* Walk the buffer heads associated with this kiobuf
+		 * checking for errors and freeing them as we go.
+		 */
+		for (nr=0; nr < brw_cb->nr; nr++) {
+			struct buffer_head *bh = brw_cb->bh[nr];
+			if (!err && buffer_uptodate(bh))
+				res += bh->b_size;
+			else
+				err = -EIO;
+			kmem_cache_free(bh_cachep, bh);
+		}
+
+		if (!res)
+			res = err;
+
+		brw_cb->cb.fn(brw_cb->cb.data, brw_cb->cb.vec, res);
+
+		kfree(brw_cb);
+	}
+}
+
+/*
+ * IO completion routine for a buffer_head being used for kiobuf IO: we
+ * can't dispatch the kiobuf callback until io_count reaches 0.  
+ */
+
+static void end_buffer_io_kiobuf_async(struct buffer_head *bh, int uptodate)
+{
+	struct brw_cb *brw_cb;
+	
+	mark_buffer_uptodate(bh, uptodate);
+
+	brw_cb = bh->b_private;
+	unlock_buffer(bh);
+
+	brw_cb_put(brw_cb);
+}
+
+
+/*
+ * Start I/O on a physical range of kernel memory, defined by a vector
+ * of kiobuf structs (much like a user-space iovec list).
+ *
+ * The kiobuf must already be locked for IO.  IO is submitted
+ * asynchronously: you need to check page->locked, page->uptodate, and
+ * maybe wait on page->wait.
+ *
+ * It is up to the caller to make sure that there are enough blocks
+ * passed in to completely map the iobufs to disk.
+ */
+
+int brw_kvec_async(int rw, kvec_cb_t cb, kdev_t dev, unsigned blocks, unsigned long blknr, int sector_shift)
+{
+	struct kvec	*vec = cb.vec;
+	struct kveclet	*veclet;
+	int		err;
+	int		length;
+	unsigned	sector_size = 1 << sector_shift;
+	int		i;
+
+	struct brw_cb	*brw_cb;
+
+	if (!vec->nr)
+		BUG();
+
+	/* 
+	 * First, do some alignment and validity checks 
+	 */
+	length = 0;
+	for (veclet=vec->veclet, i=0; i < vec->nr; i++,veclet++) {
+		length += veclet->length;
+		if ((veclet->offset & (sector_size-1)) ||
+		    (veclet->length & (sector_size-1))) {
+			printk("brw_kiovec_async: tuple[%d]->offset=0x%x length=0x%x sector_size: 0x%x\n", i, veclet->offset, veclet->length, sector_size);
+			return -EINVAL;
+		}
+	}
+
+	if (length < (blocks << sector_shift))
+		BUG();
+
+	/* 
+	 * OK to walk down the iovec doing page IO on each page we find. 
+	 */
+	err = 0;
+
+	if (!blocks) {
+		printk("brw_kiovec_async: !i\n");
+		return -EINVAL;
+	}
+
+	/* FIXME: tie into userbeans here */
+	brw_cb = kmalloc(sizeof(*brw_cb) + (blocks * sizeof(struct buffer_head *)), GFP_KERNEL);
+	if (!brw_cb)
+		return -ENOMEM;
+
+	brw_cb->cb = cb;
+	brw_cb->nr = 0;
+
+	/* This is ugly.  FIXME. */
+	for (i=0, veclet=vec->veclet; i<vec->nr; i++,veclet++) {
+		struct page *page = veclet->page;
+		unsigned offset = veclet->offset;
+		unsigned length = veclet->length;
+
+		if (!page)
+			BUG();
+
+		while (length > 0) {
+			struct buffer_head *tmp;
+			tmp = kmem_cache_alloc(bh_cachep, GFP_NOIO);
+			err = -ENOMEM;
+			if (!tmp)
+				goto error;
+
+			tmp->b_dev = B_FREE;
+			tmp->b_size = sector_size;
+			set_bh_page(tmp, page, offset);
+			tmp->b_this_page = tmp;
+
+			init_buffer(tmp, end_buffer_io_kiobuf_async, NULL);
+			tmp->b_dev = dev;
+			tmp->b_blocknr = blknr++;
+			tmp->b_state = (1 << BH_Mapped) | (1 << BH_Lock)
+					| (1 << BH_Req);
+			tmp->b_private = brw_cb;
+
+			if (rw == WRITE) {
+				set_bit(BH_Uptodate, &tmp->b_state);
+				clear_bit(BH_Dirty, &tmp->b_state);
+			}
+
+			brw_cb->bh[brw_cb->nr++] = tmp;
+			length -= sector_size;
+			offset += sector_size;
+
+			if (offset >= PAGE_SIZE) {
+				offset = 0;
+				break;
+			}
+
+			if (brw_cb->nr >= blocks)
+				goto submit;
+		} /* End of block loop */
+	} /* End of page loop */		
+
+submit:
+	atomic_set(&brw_cb->io_count, brw_cb->nr+1);
+	/* okay, we've setup all our io requests, now fire them off! */
+	for (i=0; i<brw_cb->nr; i++) 
+		submit_bh(rw, brw_cb->bh[i]);
+	brw_cb_put(brw_cb);
+
+	return 0;
+
+error:
+	/* Walk brw_cb_table freeing all the goop associated with each kiobuf */
+	if (brw_cb) {
+		/* We got an error allocating the bh'es.  Just free the current
+		   buffer_heads and exit. */
+		for (i=0; i<brw_cb->nr; i++)
+			kmem_cache_free(bh_cachep, brw_cb->bh[i]);
+		kfree(brw_cb);
+	}
+
+	return err;
+}
+#if 0
+int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
+		kdev_t dev, int nr_blocks, unsigned long b[], int sector_size)
+{
+	int i;
+	int transferred = 0;
+	int err = 0;
+
+	if (!nr)
+		return 0;
+
+	/* queue up and trigger the io */
+	err = brw_kiovec_async(rw, nr, iovec, dev, nr_blocks, b, sector_size);
+	if (err)
+		goto out;
+
+	/* wait on the last iovec first -- it's more likely to finish last */
+	for (i=nr; --i >= 0; )
+		kiobuf_wait_for_io(iovec[i]);
+
+	run_task_queue(&tq_disk);
+
+	/* okay, how much data actually got through? */
+	for (i=0; i<nr; i++) {
+		if (iovec[i]->errno) {
+			if (!err)
+				err = iovec[i]->errno;
+			break;
+		}
+		transferred += iovec[i]->length;
+	}
+
+out:
+	return transferred ? transferred : err;
+}
+#endif
diff -urN v2.4.19-pre5/fs/exec.c linux.diff/fs/exec.c
--- v2.4.19-pre5/fs/exec.c	Wed Apr  3 21:04:36 2002
+++ linux.diff/fs/exec.c	Mon Apr 29 15:54:22 2002
@@ -397,6 +397,7 @@
 	old_mm = current->mm;
 	if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
 		mm_release();
+		exit_aio(old_mm);
 		exit_mmap(old_mm);
 		return 0;
 	}
diff -urN v2.4.19-pre5/fs/ext2/file.c linux.diff/fs/ext2/file.c
--- v2.4.19-pre5/fs/ext2/file.c	Thu Nov  1 16:40:02 2001
+++ linux.diff/fs/ext2/file.c	Sun Apr  7 18:47:48 2002
@@ -40,6 +40,8 @@
  */
 struct file_operations ext2_file_operations = {
 	llseek:		generic_file_llseek,
+	kvec_read:	generic_file_kvec_read,
+	kvec_write:	generic_file_kvec_write,
 	read:		generic_file_read,
 	write:		generic_file_write,
 	ioctl:		ext2_ioctl,
@@ -47,6 +49,8 @@
 	open:		generic_file_open,
 	release:	ext2_release_file,
 	fsync:		ext2_sync_file,
+	aio_read:	generic_file_aio_read,
+	aio_write:	generic_file_aio_write,
 };
 
 struct inode_operations ext2_file_inode_operations = {
diff -urN v2.4.19-pre5/fs/ext3/file.c linux.diff/fs/ext3/file.c
--- v2.4.19-pre5/fs/ext3/file.c	Mon Nov 26 23:43:08 2001
+++ linux.diff/fs/ext3/file.c	Sun Apr  7 18:47:59 2002
@@ -78,6 +78,8 @@
 
 struct file_operations ext3_file_operations = {
 	llseek:		generic_file_llseek,	/* BKL held */
+	kvec_read:	generic_file_kvec_read,
+	kvec_write:	generic_file_kvec_write,	/* FIXME: attributes */
 	read:		generic_file_read,	/* BKL not held.  Don't need */
 	write:		ext3_file_write,	/* BKL not held.  Don't need */
 	ioctl:		ext3_ioctl,		/* BKL held */
@@ -85,6 +87,8 @@
 	open:		ext3_open_file,		/* BKL not held.  Don't need */
 	release:	ext3_release_file,	/* BKL not held.  Don't need */
 	fsync:		ext3_sync_file,		/* BKL held */
+	aio_read:	generic_file_aio_read,
+	aio_write:	generic_file_aio_write,
 };
 
 struct inode_operations ext3_file_inode_operations = {
diff -urN v2.4.19-pre5/fs/fcblist.c linux.diff/fs/fcblist.c
--- v2.4.19-pre5/fs/fcblist.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/fs/fcblist.c	Tue Apr 30 17:29:31 2002
@@ -0,0 +1,130 @@
+/*
+ *  linux/fs/fcblist.c
+ *
+ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
+ *
+ *  Handle file callbacks
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/poll.h>
+#include <asm/bitops.h>
+#include <linux/fcblist.h>
+
+
+long ion_band_table[NSIGPOLL] = {
+	ION_IN,		/* POLL_IN */
+	ION_OUT,	/* POLL_OUT */
+	ION_IN,		/* POLL_MSG */
+	ION_ERR,	/* POLL_ERR */
+	0,			/* POLL_PRI */
+	ION_HUP		/* POLL_HUP */
+};
+EXPORT_SYMBOL(ion_band_table);
+
+long poll_band_table[NSIGPOLL] = {
+	POLLIN | POLLRDNORM,			/* POLL_IN */
+	POLLOUT | POLLWRNORM | POLLWRBAND,	/* POLL_OUT */
+	POLLIN | POLLRDNORM | POLLMSG,		/* POLL_MSG */
+	POLLERR,				/* POLL_ERR */
+	POLLPRI | POLLRDBAND,			/* POLL_PRI */
+	POLLHUP | POLLERR			/* POLL_HUP */
+};
+EXPORT_SYMBOL(poll_band_table);
+
+
+void file_notify_event(struct file *filep, long *event)
+{
+	unsigned long flags;
+	struct list_head *lnk, *lsthead;
+
+	fcblist_read_lock(filep, flags);
+
+	lsthead = &filep->f_cblist;
+	list_for_each(lnk, lsthead) {
+		struct fcb_struct *fcbp = list_entry(lnk, struct fcb_struct, llink);
+
+		fcbp->cbproc(filep, fcbp->data, fcbp->local, event);
+	}
+
+	fcblist_read_unlock(filep, flags);
+}
+EXPORT_SYMBOL(file_notify_event);
+
+
+int file_notify_addcb(struct file *filep,
+		void (*cbproc)(struct file *, void *, unsigned long *, long *), void *data)
+{
+	unsigned long flags;
+	struct fcb_struct *fcbp;
+
+	if (!(fcbp = (struct fcb_struct *) kmalloc(sizeof(struct fcb_struct), GFP_KERNEL)))
+		return -ENOMEM;
+
+	memset(fcbp, 0, sizeof(struct fcb_struct));
+	fcbp->cbproc = cbproc;
+	fcbp->data = data;
+
+	fcblist_write_lock(filep, flags);
+	list_add_tail(&fcbp->llink, &filep->f_cblist);
+	fcblist_write_unlock(filep, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(file_notify_addcb);
+
+
+int file_notify_delcb(struct file *filep,
+		void (*cbproc)(struct file *, void *, unsigned long *, long *))
+{
+	unsigned long flags;
+	struct list_head *lnk, *lsthead;
+
+	fcblist_write_lock(filep, flags);
+
+	lsthead = &filep->f_cblist;
+	list_for_each(lnk, lsthead) {
+		struct fcb_struct *fcbp = list_entry(lnk, struct fcb_struct, llink);
+
+		if (fcbp->cbproc == cbproc) {
+			list_del(lnk);
+			fcblist_write_unlock(filep, flags);
+			kfree(fcbp);
+			return 0;
+		}
+	}
+
+	fcblist_write_unlock(filep, flags);
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL(file_notify_delcb);
+
+
+void file_notify_cleanup(struct file *filep)
+{
+	unsigned long flags;
+	struct list_head *lnk, *lsthead;
+
+	fcblist_write_lock(filep, flags);
+
+	lsthead = &filep->f_cblist;
+	while ((lnk = list_first(lsthead))) {
+		struct fcb_struct *fcbp = list_entry(lnk, struct fcb_struct, llink);
+
+		list_del(lnk);
+		fcblist_write_unlock(filep, flags);
+		kfree(fcbp);
+		fcblist_write_lock(filep, flags);
+	}
+
+	fcblist_write_unlock(filep, flags);
+}
+EXPORT_SYMBOL(file_notify_cleanup);
+
diff -urN v2.4.19-pre5/fs/file_table.c linux.diff/fs/file_table.c
--- v2.4.19-pre5/fs/file_table.c	Mon Sep 24 02:16:04 2001
+++ linux.diff/fs/file_table.c	Tue Apr 30 17:29:31 2002
@@ -8,6 +8,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/file.h>
+#include <linux/fcblist.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/smp_lock.h>
@@ -47,6 +48,7 @@
 		f->f_uid = current->fsuid;
 		f->f_gid = current->fsgid;
 		list_add(&f->f_list, &anon_list);
+		file_notify_init(f);
 		file_list_unlock();
 		return f;
 	}
@@ -91,6 +93,7 @@
 	filp->f_uid    = current->fsuid;
 	filp->f_gid    = current->fsgid;
 	filp->f_op     = dentry->d_inode->i_fop;
+	file_notify_init(filp);
 	if (filp->f_op->open)
 		return filp->f_op->open(dentry->d_inode, filp);
 	else
@@ -99,31 +102,36 @@
 
 void fput(struct file * file)
 {
+	if (atomic_dec_and_test(&file->f_count))
+		__fput(file);
+}
+
+void __fput(struct file * file)
+{
 	struct dentry * dentry = file->f_dentry;
 	struct vfsmount * mnt = file->f_vfsmnt;
 	struct inode * inode = dentry->d_inode;
 
-	if (atomic_dec_and_test(&file->f_count)) {
-		locks_remove_flock(file);
+	file_notify_cleanup(file);
+	locks_remove_flock(file);
 
-		if (file->f_iobuf)
-			free_kiovec(1, &file->f_iobuf);
+	if (file->f_iobuf)
+		free_kiovec(1, &file->f_iobuf);
 
-		if (file->f_op && file->f_op->release)
-			file->f_op->release(inode, file);
-		fops_put(file->f_op);
-		if (file->f_mode & FMODE_WRITE)
-			put_write_access(inode);
-		file_list_lock();
-		file->f_dentry = NULL;
-		file->f_vfsmnt = NULL;
-		list_del(&file->f_list);
-		list_add(&file->f_list, &free_list);
-		files_stat.nr_free_files++;
-		file_list_unlock();
-		dput(dentry);
-		mntput(mnt);
-	}
+	if (file->f_op && file->f_op->release)
+		file->f_op->release(inode, file);
+	fops_put(file->f_op);
+	if (file->f_mode & FMODE_WRITE)
+		put_write_access(inode);
+	file_list_lock();
+	file->f_dentry = NULL;
+	file->f_vfsmnt = NULL;
+	list_del(&file->f_list);
+	list_add(&file->f_list, &free_list);
+	files_stat.nr_free_files++;
+	file_list_unlock();
+	dput(dentry);
+	mntput(mnt);
 }
 
 struct file * fget(unsigned int fd)
diff -urN v2.4.19-pre5/fs/locks.c linux.diff/fs/locks.c
--- v2.4.19-pre5/fs/locks.c	Thu Nov  1 16:40:02 2001
+++ linux.diff/fs/locks.c	Mon Apr  8 16:46:00 2002
@@ -440,7 +440,7 @@
 	while (!list_empty(&blocker->fl_block)) {
 		struct file_lock *waiter = list_entry(blocker->fl_block.next, struct file_lock, fl_block);
 
-		if (wait) {
+		if (0) {
 			locks_notify_blocked(waiter);
 			/* Let the blocked process remove waiter from the
 			 * block list when it gets scheduled.
diff -urN v2.4.19-pre5/fs/nfs/file.c linux.diff/fs/nfs/file.c
--- v2.4.19-pre5/fs/nfs/file.c	Thu Mar  7 16:40:04 2002
+++ linux.diff/fs/nfs/file.c	Tue Apr  2 18:56:58 2002
@@ -39,9 +39,13 @@
 static ssize_t nfs_file_write(struct file *, const char *, size_t, loff_t *);
 static int  nfs_file_flush(struct file *);
 static int  nfs_fsync(struct file *, struct dentry *dentry, int datasync);
+static int nfs_kvec_write(struct file *file, kvec_cb_t cb, size_t count, loff_t pos);
+static int nfs_kvec_read(struct file *file, kvec_cb_t cb, size_t count, loff_t pos);
 
 struct file_operations nfs_file_operations = {
 	llseek:		generic_file_llseek,
+	kvec_read:	nfs_kvec_read,
+	kvec_write:	nfs_kvec_write,
 	read:		nfs_file_read,
 	write:		nfs_file_write,
 	mmap:		nfs_file_mmap,
@@ -50,6 +54,8 @@
 	release:	nfs_release,
 	fsync:		nfs_fsync,
 	lock:		nfs_lock,
+	aio_read:	generic_file_aio_read,
+	aio_write:	generic_file_aio_write,
 };
 
 struct inode_operations nfs_file_inode_operations = {
@@ -88,6 +94,28 @@
 	return status;
 }
 
+static int nfs_kvec_write(struct file *file, kvec_cb_t cb, size_t count, loff_t pos)
+{
+	struct dentry * dentry = file->f_dentry;
+	struct inode * inode = dentry->d_inode;
+	int ret;
+	ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (!ret)
+		return generic_file_kvec_write(file, cb, count, pos);
+	return ret;
+}
+
+static int nfs_kvec_read(struct file *file, kvec_cb_t cb, size_t count, loff_t pos)
+{
+	struct dentry * dentry = file->f_dentry;
+	struct inode * inode = dentry->d_inode;
+	int ret;
+	ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	if (!ret)
+		return generic_file_kvec_read(file, cb, count, pos);
+	return ret;
+}
+
 static ssize_t
 nfs_file_read(struct file * file, char * buf, size_t count, loff_t *ppos)
 {
diff -urN v2.4.19-pre5/fs/pipe.c linux.diff/fs/pipe.c
--- v2.4.19-pre5/fs/pipe.c	Wed Apr  3 21:04:37 2002
+++ linux.diff/fs/pipe.c	Tue May 14 13:18:21 2002
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/fcblist.h>
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
@@ -40,6 +41,7 @@
 pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
 {
 	struct inode *inode = filp->f_dentry->d_inode;
+	int pfull;
 	ssize_t size, read, ret;
 
 	/* Seeks are not allowed on pipes.  */
@@ -72,6 +74,7 @@
 			PIPE_WAITING_READERS(*inode)++;
 			pipe_wait(inode);
 			PIPE_WAITING_READERS(*inode)--;
+			pfull = PIPE_FULL(*inode);
 			ret = -ERESTARTSYS;
 			if (signal_pending(current))
 				goto out;
@@ -82,6 +85,8 @@
 				goto out;
 		}
 	}
+	else
+		pfull = PIPE_FULL(*inode);
 
 	/* Read what data is available.  */
 	ret = -EFAULT;
@@ -104,6 +109,9 @@
 		count -= chars;
 		buf += chars;
 	}
+	/* Send notification message */
+	if (pfull && !PIPE_FULL(*inode) && PIPE_WRITEFILE(*inode))
+		file_send_notify(PIPE_WRITEFILE(*inode), ION_OUT, POLLOUT | POLLWRNORM | POLLWRBAND);
 
 	/* Cache behaviour optimization */
 	if (!PIPE_LEN(*inode))
@@ -134,10 +142,63 @@
 	return ret;
 }
 
+static int pipe_kvec_read(struct file *filp, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	
+}
+
+spinlock_t pipe_aio_lock = SPIN_LOCK_UNLOCKED;
+
+static ssize_t pipe_aio_read (struct file *file, struct kiocb *iocb, struct iocb uiocb)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	int queued = 0;
+
+	/* down_trylock == 0 if we obtained the semaphore -> if the 
+	 * semaphore was not acquired, we queue the read request.
+	 */
+	queued = down_trylock(PIPE_SEM(*inode));
+
+	spin_lock(&pipe_aio_lock);
+	if (queued || !list_empty(&inode->i_pipe->read_iocb_list)) {
+		printk("queueing aio pipe read\n");
+		list_add_tail(&iocb->u.list, &inode->i_pipe->read_iocb_list);
+		queued = 1;
+	}
+	spin_unlock(&pipe_aio_lock);
+
+	if (queued)
+		return 0;
+
+	/* Okay, we're the first read request.  Try reading data, otherwise 
+	 * fall back and queue.
+	 */
+	if (PIPE_EMPTY(*inode)) {
+		/* No writers?  EOF. */
+		if (!PIPE_WRITERS(*inode)) {
+			aio_complete(iocb, 0, 0);
+			goto out;
+		}
+
+		/* No data.  Oh well, queue it at the head. */
+		spin_lock(&pipe_aio_lock);
+		list_add(&iocb->u.list, &inode->i_pipe->read_iocb_list);
+		spin_unlock(&pipe_aio_lock);
+	}
+
+out:
+	up(PIPE_SEM(*inode));
+	/* FIXME: writes may have been queued */
+
+	return 0;
+}
+
+
 static ssize_t
 pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
 {
 	struct inode *inode = filp->f_dentry->d_inode;
+	int pempty;
 	ssize_t free, written, ret;
 
 	/* Seeks are not allowed on pipes.  */
@@ -182,6 +243,7 @@
 	}
 
 	/* Copy into available space.  */
+	pempty = PIPE_EMPTY(*inode);
 	ret = -EFAULT;
 	while (count > 0) {
 		int space;
@@ -210,6 +272,9 @@
 			break;
 
 		do {
+			/* Send notification message */
+			if (pempty && !PIPE_EMPTY(*inode) && PIPE_READFILE(*inode))
+				file_send_notify(PIPE_READFILE(*inode), ION_IN, POLLIN | POLLRDNORM);
 			/*
 			 * Synchronous wake-up: it knows that this process
 			 * is going to give up this CPU, so it doesnt have
@@ -219,6 +284,7 @@
 			PIPE_WAITING_WRITERS(*inode)++;
 			pipe_wait(inode);
 			PIPE_WAITING_WRITERS(*inode)--;
+			pempty = PIPE_EMPTY(*inode);
 			if (signal_pending(current))
 				goto out;
 			if (!PIPE_READERS(*inode))
@@ -227,6 +293,9 @@
 		ret = -EFAULT;
 	}
 
+	/* Send notification message */
+	if (pempty && !PIPE_EMPTY(*inode) && PIPE_READFILE(*inode))
+		file_send_notify(PIPE_READFILE(*inode), ION_IN, POLLIN | POLLRDNORM);
 	/* Signal readers asynchronously that there is more data.  */
 	wake_up_interruptible(PIPE_WAIT(*inode));
 
@@ -299,9 +368,22 @@
 static int
 pipe_release(struct inode *inode, int decr, int decw)
 {
+	struct file *rdfile, *wrfile;
 	down(PIPE_SEM(*inode));
 	PIPE_READERS(*inode) -= decr;
 	PIPE_WRITERS(*inode) -= decw;
+	rdfile = PIPE_READFILE(*inode);
+	wrfile = PIPE_WRITEFILE(*inode);
+ 	if (decr && !PIPE_READERS(*inode)) {
+		PIPE_READFILE(*inode) = NULL;
+		if (wrfile)
+			file_send_notify(wrfile, ION_HUP, POLLHUP);
+	}
+	if (decw && !PIPE_WRITERS(*inode)) {
+		PIPE_WRITEFILE(*inode) = NULL;
+		if (rdfile)
+			file_send_notify(rdfile, ION_HUP, POLLHUP);
+	}
 	if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
 		struct pipe_inode_info *info = inode->i_pipe;
 		inode->i_pipe = NULL;
@@ -379,6 +461,7 @@
 struct file_operations read_fifo_fops = {
 	llseek:		no_llseek,
 	read:		pipe_read,
+	aio_read:	pipe_aio_read,
 	write:		bad_pipe_w,
 	poll:		fifo_poll,
 	ioctl:		pipe_ioctl,
@@ -454,6 +537,9 @@
 	PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 0;
 	PIPE_WAITING_READERS(*inode) = PIPE_WAITING_WRITERS(*inode) = 0;
 	PIPE_RCOUNTER(*inode) = PIPE_WCOUNTER(*inode) = 1;
+	PIPE_READFILE(*inode) = PIPE_WRITEFILE(*inode) = NULL;
+	INIT_LIST_HEAD(&inode->i_pipe->read_iocb_list);
+	INIT_LIST_HEAD(&inode->i_pipe->write_iocb_list);
 
 	return inode;
 fail_page:
@@ -561,6 +647,9 @@
 	f2->f_mode = 2;
 	f2->f_version = 0;
 
+	PIPE_READFILE(*inode) = f1;
+	PIPE_WRITEFILE(*inode) = f2;
+
 	fd_install(i, f1);
 	fd_install(j, f2);
 	fd[0] = i;
diff -urN v2.4.19-pre5/fs/select.c linux.diff/fs/select.c
--- v2.4.19-pre5/fs/select.c	Mon Sep 24 02:16:05 2001
+++ linux.diff/fs/select.c	Mon May 13 18:34:31 2002
@@ -12,6 +12,12 @@
  *  24 January 2000
  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
+ *  June 2001
+ *    Added async_poll implementation. -bcrl
+ *  Nov 2001
+ *    Async poll improvments from Suparna Bhattacharya
+ *  April 2002
+ *    smp safe async poll plus cancellation. -bcrl
  */
 
 #include <linux/slab.h>
@@ -19,6 +25,8 @@
 #include <linux/poll.h>
 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
 #include <linux/file.h>
+#include <linux/aio.h>
+#include <linux/init.h>
 
 #include <asm/uaccess.h>
 
@@ -26,19 +34,36 @@
 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
 
 struct poll_table_entry {
-	struct file * filp;
 	wait_queue_t wait;
 	wait_queue_head_t * wait_address;
+	struct file * filp;
+	poll_table * p;
 };
 
 struct poll_table_page {
+	unsigned long size;
 	struct poll_table_page * next;
 	struct poll_table_entry * entry;
 	struct poll_table_entry entries[0];
 };
 
 #define POLL_TABLE_FULL(table) \
-	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
+	((unsigned long)((table)->entry+1) > \
+	 (table)->size + (unsigned long)(table))
+
+/* async poll uses only one entry per poll table as it is linked to an iocb */
+typedef struct async_poll_table_struct {
+	poll_table		pt;		
+	struct worktodo		wtd;
+	int			events;		/* event mask for async poll */
+	int			wake;
+	long			sync;
+	struct poll_table_page	pt_page;	/* one poll table page hdr */
+	struct poll_table_entry entries[1];	/* space for a single entry */
+} async_poll_table;
+
+
+static kmem_cache_t *async_poll_table_cache;
 
 /*
  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
@@ -53,7 +78,7 @@
  * poll table.
  */
 
-void poll_freewait(poll_table* pt)
+void __poll_freewait(poll_table* pt, wait_queue_t *wait)
 {
 	struct poll_table_page * p = pt->table;
 	while (p) {
@@ -61,15 +86,154 @@
 		struct poll_table_page *old;
 
 		entry = p->entry;
+		if (entry == p->entries) /* may happen with async poll */
+			break;
 		do {
 			entry--;
-			remove_wait_queue(entry->wait_address,&entry->wait);
+			if (wait != &entry->wait)
+				remove_wait_queue(entry->wait_address,&entry->wait);
+			else
+				__remove_wait_queue(entry->wait_address,&entry->wait);
 			fput(entry->filp);
 		} while (entry > p->entries);
 		old = p;
 		p = p->next;
-		free_page((unsigned long) old);
+		if (old->size == PAGE_SIZE)
+			free_page((unsigned long) old);
 	}
+	if (pt->iocb)
+		kmem_cache_free(async_poll_table_cache, pt);
+}
+
+void poll_freewait(poll_table* pt)
+{
+	__poll_freewait(pt, NULL);
+}
+
+void async_poll_complete(void *data)
+{
+	async_poll_table *pasync = data;
+	poll_table *p = data;
+	struct kiocb	*iocb = p->iocb;
+	unsigned int	mask;
+
+	pasync->wake = 0;
+	wmb();
+	do {
+		mask = iocb->filp->f_op->poll(iocb->filp, p);
+		mask &= pasync->events | POLLERR | POLLHUP;
+		if (mask) {
+			poll_table *p2 = xchg(&iocb->data, NULL);
+			if (p2) {
+				poll_freewait(p2); 
+				aio_complete(iocb, mask, 0);
+			}
+			return;
+		}
+		pasync->sync = 0;
+		wmb();
+	} while (pasync->wake);
+}
+
+static void do_hack(async_poll_table *pasync, wait_queue_t *wait)
+{
+	struct kiocb	*iocb = pasync->pt.iocb;
+	unsigned int	mask;
+
+	mask = iocb->filp->f_op->poll(iocb->filp, NULL);
+	mask &= pasync->events | POLLERR | POLLHUP;
+	if (mask) {
+		poll_table *p2 = xchg(&iocb->data, NULL);
+		if (p2) {
+			__poll_freewait(p2, wait); 
+			aio_complete(iocb, mask, 0);
+		}
+		return;
+	}
+}
+
+static void async_poll_waiter(wait_queue_t *wait)
+{
+	struct poll_table_entry *entry = (struct poll_table_entry *)wait;
+	async_poll_table *pasync = (async_poll_table *)(entry->p);
+
+#if 1 /*OLS HACK*/
+	do_hack(pasync, wait);
+#else
+	/* avoid writes to the cacheline if possible for SMP */
+	if (!pasync->wake) {
+		pasync->wake = 1;
+		/* ensure only one wake up queues the wtd */
+		if (!pasync->sync && !test_and_set_bit(0, &pasync->sync))
+			wtd_queue(&pasync->wtd);
+	}
+#endif
+}
+
+int async_poll_cancel(struct kiocb *iocb)
+{
+	poll_table *p;
+
+	/* FIXME: almost right */
+	p = xchg(&iocb->data, NULL);
+	if (p) {
+		poll_freewait(p); 
+		aio_complete(iocb, 0, 0);
+		aio_put_req(iocb);
+		return 0;
+	}
+	return -EAGAIN;
+}
+
+int async_poll(struct kiocb *iocb, int events)
+{
+	unsigned int mask;
+	async_poll_table *pasync;
+	poll_table *p;
+
+	/* Fast path */
+	if (iocb->filp->f_op && iocb->filp->f_op->poll) {
+		mask = iocb->filp->f_op->poll(iocb->filp, NULL);
+		mask &= events | POLLERR | POLLHUP;
+		if (mask & events)
+			return events;
+	}
+
+	pasync = kmem_cache_alloc(async_poll_table_cache, SLAB_KERNEL);
+	if (!pasync)
+		return -ENOMEM;
+
+	p = (poll_table *)pasync;
+	poll_initwait(p);
+	wtd_set_action(&pasync->wtd, async_poll_complete, pasync);
+	p->iocb = iocb;
+	pasync->wake = 0;
+	pasync->sync = 0;
+	pasync->events = events;
+	pasync->pt_page.entry = pasync->pt_page.entries;
+	pasync->pt_page.size = sizeof(pasync->pt_page);
+	p->table = &pasync->pt_page;
+
+	iocb->data = p;
+	wmb();
+	iocb->cancel = async_poll_cancel;
+
+	mask = DEFAULT_POLLMASK;
+#warning broken
+	iocb->users ++;
+	if (iocb->filp->f_op && iocb->filp->f_op->poll)
+		mask = iocb->filp->f_op->poll(iocb->filp, p);
+	mask &= events | POLLERR | POLLHUP;
+	if (mask && !test_and_set_bit(0, &pasync->sync))
+		aio_complete(iocb, mask, 0);
+
+	if (aio_put_req(iocb))
+		/* Must be freed after aio_complete to synchronise with 
+		 * cancellation of the request.
+		 */
+		poll_freewait(p);
+
+	return 0;
 }
 
 void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -85,6 +249,7 @@
 			__set_current_state(TASK_RUNNING);
 			return;
 		}
+		new_table->size = PAGE_SIZE;
 		new_table->entry = new_table->entries;
 		new_table->next = table;
 		p->table = new_table;
@@ -98,7 +263,11 @@
 	 	get_file(filp);
 	 	entry->filp = filp;
 		entry->wait_address = wait_address;
-		init_waitqueue_entry(&entry->wait, current);
+		entry->p = p;
+		if (p->iocb) /* async poll */
+			init_waitqueue_func_entry(&entry->wait, async_poll_waiter);
+		else
+			init_waitqueue_entry(&entry->wait, current);
 		add_wait_queue(wait_address,&entry->wait);
 	}
 }
@@ -494,3 +663,14 @@
 	poll_freewait(&table);
 	return err;
 }
+
+static int __init async_poll_init(void)
+{
+	async_poll_table_cache = kmem_cache_create("async poll table",
+                        sizeof(async_poll_table), 0, 0, NULL, NULL);
+	if (!async_poll_table_cache)
+		panic("unable to alloc poll_table_cache");
+	return 0;
+}
+
+module_init(async_poll_init);
diff -urN v2.4.19-pre5/include/asm-i386/a.out.h linux.diff/include/asm-i386/a.out.h
--- v2.4.19-pre5/include/asm-i386/a.out.h	Fri Jun 16 14:33:06 1995
+++ linux.diff/include/asm-i386/a.out.h	Tue Apr  2 18:56:58 2002
@@ -19,7 +19,9 @@
 
 #ifdef __KERNEL__
 
-#define STACK_TOP	TASK_SIZE
+#define VSYSCALL_SIZE		0x10000		/* 64KB for vsyscalls */
+#define STACK_GUARD_SIZE	0x02000		/* 8KB guard area */
+#define STACK_TOP	(TASK_SIZE - VSYSCALL_SIZE - STACK_GUARD_SIZE)
 
 #endif
 
diff -urN v2.4.19-pre5/include/asm-i386/poll.h linux.diff/include/asm-i386/poll.h
--- v2.4.19-pre5/include/asm-i386/poll.h	Thu Jan 23 14:01:28 1997
+++ linux.diff/include/asm-i386/poll.h	Tue Apr 30 17:29:31 2002
@@ -15,6 +15,7 @@
 #define POLLWRNORM	0x0100
 #define POLLWRBAND	0x0200
 #define POLLMSG		0x0400
+#define POLLREMOVE	0x1000
 
 struct pollfd {
 	int fd;
diff -urN v2.4.19-pre5/include/asm-i386/unistd.h linux.diff/include/asm-i386/unistd.h
--- v2.4.19-pre5/include/asm-i386/unistd.h	Wed Apr  3 21:04:38 2002
+++ linux.diff/include/asm-i386/unistd.h	Tue Apr  2 18:56:58 2002
@@ -245,6 +245,8 @@
 
 #define __NR_tkill		238
 
+#define __NR_sys_dynamic_syscall	250
+
 /* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
 
 #define __syscall_return(type, res) \
diff -urN v2.4.19-pre5/include/asm-i386/vsyscall.h linux.diff/include/asm-i386/vsyscall.h
--- v2.4.19-pre5/include/asm-i386/vsyscall.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/asm-i386/vsyscall.h	Mon Apr 22 11:49:21 2002
@@ -0,0 +1,42 @@
+#ifndef __ASM__VSYSCALL_H
+#define __ASM__VSYSCALL_H
+/* include/asm-i386/vsyscall.h
+ *	Copyright 2002 Red Hat, Inc.
+ */
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+/* We call sys_dynamic_syscall(long nr, void *args) using regparm(2)
+ * convention.  The .text.vsyscall section is mapped into userspace, 
+ * whereas .data.vsyscall_list is a kernel-only array of the vsyscalls 
+ * and the valid userspace address to call them from.  All vsyscalls 
+ * are called with C calling convention (ie args on the stack for x86).
+ *
+ * Note: the layout of .data.vsyscall_list must match the entries in
+ * dynamic_syscall.c.
+ */
+#define STRINGIFYa(x)	#x
+#define STRINGIFY(x)	STRINGIFYa(x)
+#define NR_dyn_sys	STRINGIFY(__NR_sys_dynamic_syscall)
+#define add_dynamic_syscall(name)				\
+	__asm__("						\n\
+	.section .vsyscall_text, \"xa\"				\n\
+	.globl v" #name "					\n\
+	v" #name ":						\n\
+		push %ecx					\n\
+		push %edx					\n\
+		movl $" NR_dyn_sys ",%eax			\n\
+		movl $2f,%edx					\n\
+		leal 12(%esp),%ecx				\n\
+		int $0x80					\n\
+	1:							\n\
+		popl %edx					\n\
+		popl %ecx					\n\
+		ret						\n\
+	.size v" #name ",.-v" #name "				\n\
+	.previous						\n\
+	.section .data.vsyscall_list,\"a\"			\n\
+	2:	.long	1b," #name "				\n\
+	.previous")
+
+#endif
diff -urN v2.4.19-pre5/include/linux/aio.h linux.diff/include/linux/aio.h
--- v2.4.19-pre5/include/linux/aio.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/aio.h	Tue May 14 01:15:47 2002
@@ -0,0 +1,129 @@
+#ifndef __LINUX__AIO_H
+#define __LINUX__AIO_H
+
+#include <linux/tqueue.h>
+#include <linux/kiovec.h>
+#include <linux/list.h>
+#include <asm/atomic.h>
+
+#include <linux/aio_abi.h>
+
+#define AIO_MAXSEGS		4
+#define AIO_KIOGRP_NR_ATOMIC	8
+
+struct kioctx;
+
+/* Notes on cancelling a kiocb:
+ *	If a kiocb is cancelled, aio_complete may return 0 to indicate 
+ *	that cancel has not yet disposed of the kiocb.  All cancel 
+ *	operations *must* call aio_put_req to dispose of the kiocb 
+ *	to guard against races with the completion code.
+ */
+#define KIOCB_C_CANCELLED	0x01
+#define KIOCB_C_COMPLETE	0x02
+
+struct kiocb {
+	struct list_head	list;
+	struct file	*filp;
+	struct kioctx	*ctx;
+	void		*user_obj;
+	__u64		user_data;
+	loff_t		pos;
+	unsigned long	buf;
+	size_t		nr_transferred;	/* used for chunking */
+	size_t		size;
+	size_t		this_size;
+	unsigned	key;		/* id of this request */
+	int		(*cancel)(struct kiocb *kiocb);
+	void		*data;		/* for use by the the async op */
+	int		users;
+	union {
+		struct tq_struct	tq;	/* argh. */
+		struct list_head	list;
+	} u;
+};
+
+struct aio_ring {
+	unsigned	id;	/* kernel internal index number */
+	unsigned	nr;	/* number of io_events */
+	unsigned	head;
+	unsigned	tail;
+
+	unsigned	woke;	/* set when a wakeup was sent */
+	unsigned	pad[3];
+
+
+	struct io_event		io_events[0];
+}; /* 128 bytes + ring size */
+
+#define aio_ring_avail(info)	(((info)->ring->head + (info)->nr - 1 - (info)->ring->tail) % (info)->nr)
+
+#define AIO_RING_PAGES	8
+struct aio_ring_info {
+	//struct file		*mmap_file;
+	struct kvec		*kvec;
+	unsigned long		mmap_base;
+	unsigned long		mmap_size;
+
+	struct aio_ring		*ring;	/* == pages[0] */
+	char			**pages;
+	spinlock_t		ring_lock;
+	unsigned		nr_pages;
+
+	unsigned		nr, tail;
+
+	char			*internal_pages[AIO_RING_PAGES];
+};
+
+struct kioctx {
+	atomic_t		users;
+	int			dead;
+	struct mm_struct	*mm;
+
+	/* This needs improving */
+	unsigned long		user_id;
+	struct kioctx		*next;
+
+	wait_queue_head_t	wait;
+
+	spinlock_t		lock;
+
+	int			reqs_active;
+	struct list_head	free_reqs;
+	struct list_head	active_reqs;	/* used for cancellation */
+
+	unsigned		max_reqs;
+
+	struct aio_ring_info	ring_info;
+};
+
+extern struct file_operations aio_fops;
+
+extern int FASTCALL(aio_put_req(struct kiocb *iocb));
+extern int FASTCALL(aio_complete(struct kiocb *iocb, long res, long res2));
+extern void __put_ioctx(struct kioctx *ctx);
+struct mm_struct;
+extern void exit_aio(struct mm_struct *mm);
+
+#define get_ioctx(kioctx)	do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0)
+#define put_ioctx(kioctx)	do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0)
+
+#include <linux/aio_abi.h>
+
+static inline struct kiocb *list_kiocb(struct list_head *h)
+{
+	return list_entry(h, struct kiocb, list);
+}
+
+struct file;
+extern int generic_aio_poll(struct file *file, struct kiocb *req, struct iocb iocb);
+extern ssize_t generic_aio_read(struct file *file, struct kiocb *req, struct iocb iocb, size_t min_size);
+extern ssize_t generic_aio_write(struct file *file, struct kiocb *req, struct iocb iocb, size_t min_size);
+extern ssize_t generic_file_aio_read(struct file *file, struct kiocb *req, struct iocb iocb);
+extern ssize_t generic_file_aio_write(struct file *file, struct kiocb *req, struct iocb iocb);
+extern ssize_t generic_sock_aio_read(struct file *file, struct kiocb *req, struct iocb iocb);
+
+/* for sysctl: */
+extern unsigned aio_nr, aio_max_nr, aio_max_size, aio_max_pinned;
+
+#endif /* __LINUX__AIO_H */
diff -urN v2.4.19-pre5/include/linux/aio_abi.h linux.diff/include/linux/aio_abi.h
--- v2.4.19-pre5/include/linux/aio_abi.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/aio_abi.h	Sun May 12 20:38:23 2002
@@ -0,0 +1,87 @@
+/* linux/aio_abi.h
+ *
+ * Copyright 2000,2001,2002 Red Hat.
+ *
+ * Written by Benjamin LaHaise <bcrl@redhat.com>
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies.  This software is provided without any
+ * warranty, express or implied.  Red Hat makes no representations about
+ * the suitability of this software for any purpose.
+ *
+ * IN NO EVENT SHALL RED HAT BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+ * SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+ * THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF RED HAT HAS BEEN ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * RED HAT DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND
+ * RED HAT HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
+ * ENHANCEMENTS, OR MODIFICATIONS.
+ */
+#ifndef __LINUX__AIO_ABI_H
+#define __LINUX__AIO_ABI_H
+
+#include <asm/byteorder.h>
+
+typedef unsigned long	aio_context_t;
+
+enum {
+	IOCB_CMD_PREAD = 0,
+	IOCB_CMD_PWRITE = 1,
+	IOCB_CMD_FSYNC = 2,
+	IOCB_CMD_FDSYNC = 3,
+	IOCB_CMD_PREADX = 4,
+	IOCB_CMD_POLL = 5,
+	IOCB_CMD_NOOP = 6,
+};
+
+/* read() from /dev/aio returns these structures. */
+struct io_event {
+	__u64		data;		/* the data field from the iocb */
+	__u64		obj;		/* what iocb this event came from */
+	__s64		res;		/* result code for this event */
+	__s64		res2;		/* secondary result */
+};
+
+#if defined(__LITTLE_ENDIAN)
+#define PADDED(x,y)	x, y
+#elif defined(__BIG_ENDIAN)
+#define PADDED(x,y)	y, x
+#else
+#error edit for your odd byteorder.
+#endif
+
+/*
+ * we always use a 64bit off_t when communicating
+ * with userland.  its up to libraries to do the
+ * proper padding and aio_error abstraction
+ */
+
+struct iocb {
+	/* these are internal to the kernel/libc. */
+	__u64	aio_data;	/* data to be returned in event's data */
+	__u32	PADDED(aio_key, aio_reserved1);
+				/* the kernel sets aio_key to the req # */
+
+	/* common fields */
+	__u16	aio_lio_opcode;	/* see IOCB_CMD_ above */
+	__s16	aio_reqprio;
+	__u32	aio_fildes;
+
+	__u64	aio_buf;
+	__u64	aio_nbytes;
+	__s64	aio_offset;
+
+	/* extra parameters */
+	__u64	aio_reserved2;
+	__u64	aio_reserved3;
+}; /* 64 bytes */
+
+#undef IFBIG
+#undef IFLITTLE
+
+#endif /* __LINUX__AIO_ABI_H */
+
diff -urN v2.4.19-pre5/include/linux/brlock.h linux.diff/include/linux/brlock.h
--- v2.4.19-pre5/include/linux/brlock.h	Wed Apr  3 21:10:30 2002
+++ linux.diff/include/linux/brlock.h	Tue May  7 17:15:39 2002
@@ -34,6 +34,7 @@
 enum brlock_indices {
 	BR_GLOBALIRQ_LOCK,
 	BR_NETPROTO_LOCK,
+	BR_AIO_REQ_LOCK,
 
 	__BR_END
 };
diff -urN v2.4.19-pre5/include/linux/errno.h linux.diff/include/linux/errno.h
--- v2.4.19-pre5/include/linux/errno.h	Tue Nov  6 20:40:27 2001
+++ linux.diff/include/linux/errno.h	Tue Apr  2 18:56:57 2002
@@ -21,6 +21,9 @@
 #define EBADTYPE	527	/* Type not supported by server */
 #define EJUKEBOX	528	/* Request initiated, but will not complete before timeout */
 
+/* Defined for TUX async IO */
+#define EWOULDBLOCKIO	530	/* Would block due to block-IO */
+
 #endif
 
 #endif
diff -urN v2.4.19-pre5/include/linux/eventpoll.h linux.diff/include/linux/eventpoll.h
--- v2.4.19-pre5/include/linux/eventpoll.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/eventpoll.h	Tue Apr 30 17:29:31 2002
@@ -0,0 +1,43 @@
+/*
+ *  include/linux/eventpoll.h
+ *
+ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
+ *
+ *  Efficent event polling implementation
+ */
+
+
+#ifndef _LINUX_EVENTPOLL_H
+#define _LINUX_EVENTPOLL_H
+
+
+
+
+#define EVENTPOLL_MINOR	124
+#define POLLFD_X_PAGE	(PAGE_SIZE / sizeof(struct pollfd))
+#define MAX_FDS_IN_EVENTPOLL	(1024 * 128)
+#define MAX_EVENTPOLL_PAGES	(MAX_FDS_IN_EVENTPOLL / POLLFD_X_PAGE)
+#define EVENT_PAGE_INDEX(n)	((n) / POLLFD_X_PAGE)
+#define EVENT_PAGE_REM(n)	((n) % POLLFD_X_PAGE)
+#define EVENT_PAGE_OFFSET(n)	(((n) % POLLFD_X_PAGE) * sizeof(struct pollfd))
+#define EP_FDS_PAGES(n)	(((n) + POLLFD_X_PAGE - 1) / POLLFD_X_PAGE)
+#define EP_MAP_SIZE(n)	(EP_FDS_PAGES(n) * PAGE_SIZE * 2)
+
+
+
+
+
+struct evpoll {
+	int ep_timeout;
+	unsigned long ep_resoff;
+};
+
+#define EP_ALLOC	_IOR('P', 1, int)
+#define EP_POLL		_IOWR('P', 2, struct evpoll)
+#define EP_FREE		_IO('P', 3)
+#define EP_ISPOLLED	_IOWR('P', 4, struct pollfd)
+
+
+
+#endif
+
diff -urN v2.4.19-pre5/include/linux/fcblist.h linux.diff/include/linux/fcblist.h
--- v2.4.19-pre5/include/linux/fcblist.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/fcblist.h	Tue May  7 17:15:40 2002
@@ -0,0 +1,67 @@
+/*
+ *  include/linux/fcblist.h
+ *
+ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
+ *
+ *  Handle file callbacks
+ */
+
+#ifndef __LINUX_FCBLIST_H
+#define __LINUX_FCBLIST_H
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+
+
+
+/* file callback notification events */
+#define ION_IN		1
+#define ION_OUT		2
+#define ION_HUP		3
+#define ION_ERR		4
+
+#define FCB_LOCAL_SIZE	4
+
+#define fcblist_read_lock(fp, fl)		read_lock_irqsave(&(fp)->f_cblock, fl)
+#define fcblist_read_unlock(fp, fl)		read_unlock_irqrestore(&(fp)->f_cblock, fl)
+#define fcblist_write_lock(fp, fl)		write_lock_irqsave(&(fp)->f_cblock, fl)
+#define fcblist_write_unlock(fp, fl)	write_unlock_irqrestore(&(fp)->f_cblock, fl)
+
+struct fcb_struct {
+	struct list_head llink;
+	void (*cbproc)(struct file *, void *, unsigned long *, long *);
+	void *data;
+	unsigned long local[FCB_LOCAL_SIZE];
+};
+
+
+extern long ion_band_table[];
+extern long poll_band_table[];
+
+
+void file_notify_event(struct file *filep, long *event);
+
+int file_notify_addcb(struct file *filep,
+		void (*cbproc)(struct file *, void *, unsigned long *, long *), void *data);
+
+int file_notify_delcb(struct file *filep,
+		void (*cbproc)(struct file *, void *, unsigned long *, long *));
+
+void file_notify_cleanup(struct file *filep);
+
+
+static inline void file_notify_init(struct file *filep)
+{
+	rwlock_init(&filep->f_cblock);
+	INIT_LIST_HEAD(&filep->f_cblist);
+}
+
+static inline void file_send_notify(struct file *filep, long ioevt, long plevt) {
+	long event[] = { ioevt, plevt, -1 };
+
+	file_notify_event(filep, event);
+}
+
+#endif
diff -urN v2.4.19-pre5/include/linux/file.h linux.diff/include/linux/file.h
--- v2.4.19-pre5/include/linux/file.h	Wed Apr  3 21:04:40 2002
+++ linux.diff/include/linux/file.h	Tue Apr  2 18:56:57 2002
@@ -5,6 +5,7 @@
 #ifndef __LINUX_FILE_H
 #define __LINUX_FILE_H
 
+extern void FASTCALL(__fput(struct file *));
 extern void FASTCALL(fput(struct file *));
 extern struct file * FASTCALL(fget(unsigned int fd));
  
diff -urN v2.4.19-pre5/include/linux/fs.h linux.diff/include/linux/fs.h
--- v2.4.19-pre5/include/linux/fs.h	Wed Apr  3 21:12:53 2002
+++ linux.diff/include/linux/fs.h	Tue May 14 12:54:53 2002
@@ -196,6 +196,8 @@
 #define FIGETBSZ   _IO(0x00,2)	/* get the block size used for bmap */
 
 #ifdef __KERNEL__
+#include <linux/aio.h>
+#include <linux/aio_abi.h>
 
 #include <asm/semaphore.h>
 #include <asm/byteorder.h>
@@ -536,6 +538,10 @@
 	/* needed for tty driver, and maybe others */
 	void			*private_data;
 
+	/* file callback list */
+	rwlock_t f_cblock;
+	struct list_head f_cblist;
+
 	/* preallocated helper kiobuf to speedup O_DIRECT */
 	struct kiobuf		*f_iobuf;
 	long			f_iobuf_lock;
@@ -823,6 +829,10 @@
  * read, write, poll, fsync, readv, writev can be called
  *   without the big kernel lock held in all filesystems.
  */
+
+#define F_ATOMIC	0x0001
+#define F_OFFSETOK	0x0002
+
 struct file_operations {
 	struct module *owner;
 	loff_t (*llseek) (struct file *, loff_t, int);
@@ -842,6 +852,16 @@
 	ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
 	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+
+	/* in-kernel fully async api */
+	int (*kvec_read)(struct file *, kvec_cb_t, size_t, loff_t);
+	int (*kvec_write)(struct file *, kvec_cb_t, size_t, loff_t);
+
+	/* userland aio ops */
+	ssize_t (*aio_read)(struct file *, struct kiocb *, struct iocb);
+	ssize_t (*aio_readx)(struct file *, struct kiocb *, struct iocb);
+	ssize_t (*aio_write)(struct file *, struct kiocb *, struct iocb);
+	ssize_t (*aio_fsync)(struct file *, struct kiocb *, struct iocb);
 };
 
 struct inode_operations {
@@ -1420,12 +1440,16 @@
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
 extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
+extern ssize_t generic_file_new_read(struct file *, char *, size_t, loff_t *, int);
 extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *);
-extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t);
-extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
+extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t, int);
+extern int generic_file_kvec_read(struct file *file, kvec_cb_t cb, size_t size, loff_t pos);
+extern int generic_file_kvec_write(struct file *file, kvec_cb_t cb, size_t size, loff_t pos);
+
 extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
 extern int generic_file_open(struct inode * inode, struct file * filp);
+extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
 
 extern struct file_operations generic_ro_fops;
 
diff -urN v2.4.19-pre5/include/linux/iobuf.h linux.diff/include/linux/iobuf.h
--- v2.4.19-pre5/include/linux/iobuf.h	Wed Apr  3 21:12:55 2002
+++ linux.diff/include/linux/iobuf.h	Tue May 14 12:54:53 2002
@@ -53,8 +53,10 @@
 
 	/* Dynamic state for IO completion: */
 	atomic_t	io_count;	/* IOs still in progress */
+	int		transferred;	/* Number of bytes of completed IO at the beginning of the buffer */
 	int		errno;		/* Status of completed IO */
 	void		(*end_io) (struct kiobuf *); /* Completion callback */
+	void		*end_io_data;
 	wait_queue_head_t wait_queue;
 };
 
@@ -80,6 +82,8 @@
 
 /* fs/buffer.c */
 
+int	brw_kiovec_async(int rw, int nr, struct kiobuf *iovec[], 
+		   kdev_t dev, int nr_blocks, unsigned long b[], int size);
 int	brw_kiovec(int rw, int nr, struct kiobuf *iovec[], 
 		   kdev_t dev, unsigned long b[], int size);
 
diff -urN v2.4.19-pre5/include/linux/kiovec.h linux.diff/include/linux/kiovec.h
--- v2.4.19-pre5/include/linux/kiovec.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/kiovec.h	Tue May  7 17:15:39 2002
@@ -0,0 +1,123 @@
+#ifndef __LINUX__KIOVEC_H
+#define __LINUX__KIOVEC_H
+
+struct page;
+#include <linux/list.h>
+
+struct kveclet {
+	struct page	*page;
+	unsigned	offset;
+	unsigned	length;
+};
+
+struct kvec {
+	unsigned	max_nr;
+	unsigned	nr;
+	struct kveclet	veclet[0];
+};
+
+struct kvec_cb {
+	struct kvec	*vec;
+	void		(*fn)(void *data, struct kvec *vec, ssize_t res);
+	void		*data;
+};
+
+struct kvec_cb_list {
+	struct list_head	list;
+	struct kvec_cb		cb;
+};
+
+#ifndef _LINUX_TYPES_H
+#include <linux/types.h>
+#endif
+#ifndef _LINUX_KDEV_T_H
+#include <linux/kdev_t.h>
+#endif
+#ifndef _ASM_KMAP_TYPES_H
+#include <asm/kmap_types.h>
+#endif
+
+extern struct kvec *map_user_kvec(int rw, unsigned long va, size_t len);
+extern struct kvec *mm_map_user_kvec(struct mm_struct *, int rw,
+				     unsigned long va, size_t len);
+extern void unmap_kvec(struct kvec *, int dirtied);
+extern void free_kvec(struct kvec *);
+
+/* brw_kvec_async:
+ *	Performs direct io to/from disk into cb.vec.  Count is the number
+ *	of sectors to read, sector_shift is the blocksize (which must be
+ *	compatible with the kernel's current idea of the device's sector
+ *	size) in log2.  blknr is the starting sector offset on dev.
+ *
+ */
+extern int brw_kvec_async(int rw, kvec_cb_t cb, kdev_t dev, unsigned count,
+			  unsigned long blknr, int sector_shift);
+
+/* Memory copy helpers usage:
+ * void foo(... struct kveclet *veclet...)
+ *
+ *	struct kvec_dst	dst;
+ *
+ *	kvec_dst_init(&dst, KM_USER0);			-- resets type
+ *	kvec_dst_set(&dst, veclet);			-- set target & clear offset
+ *	kvec_dst_map(&dst);				-- activates kmap
+ *	for (...)
+ *		memcpy_to_kvec_dst(&dst, data, size);	-- each copy appends
+ *	kvec_dst_unmap(&dst);				-- releases kmap
+ *
+ * Note that scheduling is not permitted between kvec_dst_map() and
+ * kvec_dst_unmap().  This is because internally the routines make use
+ * of an atomic kmap.
+ */
+struct kvec_dst {
+	char		*addr;
+	char		*dst;
+	struct kveclet	*let;
+	int		space;
+	int		offset;
+	enum km_type	type;
+};
+
+
+#define kvec_dst_set(Xdst, Xlet)					\
+	do {								\
+		struct kvec_dst *_dst = (Xdst);				\
+		struct kveclet *_let = (Xlet);				\
+		_dst->let = _let;					\
+		_dst->space = _let->length;				\
+		_dst->offset = 0;					\
+	} while(0)
+
+#define kvec_dst_map(Xdst)						\
+	do {								\
+		struct kvec_dst *_dst = (Xdst);				\
+		struct kveclet *_let = _dst->let;			\
+		_dst->dst = _dst->addr = kmap_atomic(_let->page, _dst->type);\
+		_dst->dst += _let->offset + _dst->offset;		\
+		_dst->space = _let->length - _dst->offset;		\
+		_dst->offset = 0;					\
+	} while(0)
+
+#define kvec_dst_init(Xdst, Xtype)					\
+	do {								\
+		(Xdst)->space = 0;					\
+		(Xdst)->addr = 0;					\
+		(Xdst)->offset = 0;					\
+		(Xdst)->type = Xtype;					\
+	} while(0)
+
+#define	kvec_dst_unmap(Xdst)						\
+	do {								\
+		struct kvec_dst *_dst = (Xdst);				\
+		kunmap_atomic(_dst->addr, _dst->type);			\
+		_dst->offset = _dst->dst - _dst->addr;			\
+		_dst->offset -= _dst->let->offset;			\
+		_dst->addr = NULL;					\
+	} while(0)
+
+extern void FASTCALL(memcpy_to_kvec_dst(struct kvec_dst *dst,
+					const char *from, long len));
+extern void FASTCALL(memcpy_from_kvec_dst(char *to,
+					  struct kvec_dst *from, long len));
+
+#endif /* __LINUX__KIOVEC_H */
diff -urN v2.4.19-pre5/include/linux/list.h linux.diff/include/linux/list.h
--- v2.4.19-pre5/include/linux/list.h	Wed Apr  3 21:12:49 2002
+++ linux.diff/include/linux/list.h	Tue May  7 17:15:39 2002
@@ -172,6 +172,11 @@
         	pos = pos->prev, prefetch(pos->prev))
         	
 
+#define list_first(head)	(((head)->next != (head)) ? (head)->next: (struct list_head *) 0)
+#define list_last(head)	(((head)->prev != (head)) ? (head)->prev: (struct list_head *) 0)
+#define list_next(pos, head)	(((pos)->next != (head)) ? (pos)->next: (struct list_head *) 0)
+#define list_prev(pos, head)	(((pos)->prev != (head)) ? (pos)->prev: (struct list_head *) 0)
+
 #endif /* __KERNEL__ || _LVM_H_INCLUDE */
 
 #endif
diff -urN v2.4.19-pre5/include/linux/net.h linux.diff/include/linux/net.h
--- v2.4.19-pre5/include/linux/net.h	Wed Apr  3 21:12:49 2002
+++ linux.diff/include/linux/net.h	Tue May 14 01:16:14 2002
@@ -83,6 +83,9 @@
 struct scm_cookie;
 struct vm_area_struct;
 struct page;
+struct iocb;
+struct kioctx;
+#include <linux/aio.h>		/* shut gcc up */
 
 struct proto_ops {
   int	family;
@@ -110,6 +113,8 @@
   int   (*recvmsg)	(struct socket *sock, struct msghdr *m, int total_len, int flags, struct scm_cookie *scm);
   int	(*mmap)		(struct file *file, struct socket *sock, struct vm_area_struct * vma);
   ssize_t (*sendpage)	(struct socket *sock, struct page *page, int offset, size_t size, int flags);
+  int   (*kvec_read)	(struct socket *sock, kvec_cb_t cb, size_t size);
+  int   (*kvec_write)	(struct socket *sock, kvec_cb_t cb, size_t size);
 };
 
 struct net_proto_family 
diff -urN v2.4.19-pre5/include/linux/pagemap.h linux.diff/include/linux/pagemap.h
--- v2.4.19-pre5/include/linux/pagemap.h	Wed Apr  3 21:12:55 2002
+++ linux.diff/include/linux/pagemap.h	Tue May 14 12:54:53 2002
@@ -88,6 +88,7 @@
 extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
 extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
 extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash);
+extern wait_queue_head_t *FASTCALL(page_waitqueue(struct page *page));
 
 extern void ___wait_on_page(struct page *);
 
diff -urN v2.4.19-pre5/include/linux/pipe_fs_i.h linux.diff/include/linux/pipe_fs_i.h
--- v2.4.19-pre5/include/linux/pipe_fs_i.h	Thu May  3 11:22:20 2001
+++ linux.diff/include/linux/pipe_fs_i.h	Tue May 14 01:37:23 2002
@@ -1,6 +1,8 @@
 #ifndef _LINUX_PIPE_FS_I_H
 #define _LINUX_PIPE_FS_I_H
 
+#include <linux/list.h>
+
 #define PIPEFS_MAGIC 0x50495045
 struct pipe_inode_info {
 	wait_queue_head_t wait;
@@ -13,6 +15,11 @@
 	unsigned int waiting_writers;
 	unsigned int r_counter;
 	unsigned int w_counter;
+	struct file *rdfile;
+	struct file *wrfile;
+
+	struct list_head	read_iocb_list;
+	struct list_head	write_iocb_list;
 };
 
 /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
@@ -30,6 +37,8 @@
 #define PIPE_WAITING_WRITERS(inode)	((inode).i_pipe->waiting_writers)
 #define PIPE_RCOUNTER(inode)	((inode).i_pipe->r_counter)
 #define PIPE_WCOUNTER(inode)	((inode).i_pipe->w_counter)
+#define PIPE_READFILE(inode)	((inode).i_pipe->rdfile)
+#define PIPE_WRITEFILE(inode)	((inode).i_pipe->wrfile)
 
 #define PIPE_EMPTY(inode)	(PIPE_LEN(inode) == 0)
 #define PIPE_FULL(inode)	(PIPE_LEN(inode) == PIPE_SIZE)
diff -urN v2.4.19-pre5/include/linux/poll.h linux.diff/include/linux/poll.h
--- v2.4.19-pre5/include/linux/poll.h	Wed Apr  3 21:12:55 2002
+++ linux.diff/include/linux/poll.h	Tue May 14 12:54:58 2002
@@ -9,12 +9,15 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <asm/uaccess.h>
+#include <linux/worktodo.h>
 
 struct poll_table_page;
+struct kiocb;
 
 typedef struct poll_table_struct {
-	int error;
-	struct poll_table_page * table;
+	int			error;
+	struct poll_table_page	*table;
+	struct kiocb		*iocb;		/* iocb for async poll */
 } poll_table;
 
 extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
@@ -29,8 +32,11 @@
 {
 	pt->error = 0;
 	pt->table = NULL;
+	pt->iocb = NULL;
 }
+
 extern void poll_freewait(poll_table* pt);
+extern int async_poll(struct kiocb *iocb, int events);
 
 
 /*
diff -urN v2.4.19-pre5/include/linux/sched.h linux.diff/include/linux/sched.h
--- v2.4.19-pre5/include/linux/sched.h	Wed Apr  3 21:12:55 2002
+++ linux.diff/include/linux/sched.h	Tue May 14 12:54:53 2002
@@ -203,6 +203,7 @@
 
 extern int max_map_count;
 
+struct kioctx;
 struct mm_struct {
 	struct vm_area_struct * mmap;		/* list of VMAs */
 	rb_root_t mm_rb;
@@ -231,6 +232,10 @@
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
+
+	struct kioctx	*ioctx_list;
+	unsigned long	new_ioctx_id;
+	int		vsys_mapped;
 };
 
 extern int mmlist_nr;
@@ -243,6 +248,7 @@
 	mm_count:	ATOMIC_INIT(1), 		\
 	mmap_sem:	__RWSEM_INITIALIZER(name.mmap_sem), \
 	page_table_lock: SPIN_LOCK_UNLOCKED, 		\
+    	vsys_mapped:	0,				\
 	mmlist:		LIST_HEAD_INIT(name.mmlist),	\
 }
 
@@ -794,6 +800,7 @@
 
 extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
+extern void FASTCALL(add_wait_queue_exclusive_lifo(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 
 #define __wait_event(wq, condition) 					\
diff -urN v2.4.19-pre5/include/linux/skbuff.h linux.diff/include/linux/skbuff.h
--- v2.4.19-pre5/include/linux/skbuff.h	Wed Apr  3 21:12:55 2002
+++ linux.diff/include/linux/skbuff.h	Tue May 14 12:54:58 2002
@@ -1128,6 +1128,15 @@
 extern unsigned int		skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
 extern void			skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 
+/* skb <-> kvec helpers */
+extern void skb_copy_datagram_kvec(const struct sk_buff *skb, int offset,
+			struct kvec *vec, int len);
+extern int skb_copy_and_csum_datagram_kvec(const struct sk_buff *skb,
+		int offset, struct kvec *vec, int len);
+extern int skb_kvec_recv_datagram(struct sock * sk, kvec_cb_t cb, int len,
+        void (*finish)(struct sock *sk, kvec_cb_t cb, int len, struct sk_buff *skb));
+
+
 extern void skb_init(void);
 extern void skb_add_mtu(int mtu);
 
diff -urN v2.4.19-pre5/include/linux/sysctl.h linux.diff/include/linux/sysctl.h
--- v2.4.19-pre5/include/linux/sysctl.h	Wed Apr  3 21:12:49 2002
+++ linux.diff/include/linux/sysctl.h	Tue May  7 17:15:39 2002
@@ -546,6 +546,13 @@
 	FS_LEASES=13,	/* int: leases enabled */
 	FS_DIR_NOTIFY=14,	/* int: directory notification enabled */
 	FS_LEASE_TIME=15,	/* int: maximum time to wait for a lease break */
+	/* 16 == jbd-debug */
+	/* 17 == jbd-oom-retry */
+
+	FS_AIO_NR=18,		/* int: current number of aio requests */
+	FS_AIO_MAX_NR=19,	/* int: max system wide aio requests */
+	FS_AIO_MAX_SIZE=20,	/* int: max size of read/write chunks */
+	FS_AIO_MAX_PINNED=21,	/* long: max memory pinned (in pages) */
 };
 
 /* CTL_DEBUG names: */
diff -urN v2.4.19-pre5/include/linux/tasklet.h linux.diff/include/linux/tasklet.h
--- v2.4.19-pre5/include/linux/tasklet.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/tasklet.h	Wed Apr 10 17:06:48 2002
@@ -0,0 +1,154 @@
+#ifndef __LINUX__TASKLET_H
+#define __LINUX__TASKLET_H
+
+#include <linux/config.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/system.h>		/* for smp_mb */
+
+/* Tasklets --- multithreaded analogue of BHs.
+
+   Main feature differing them of generic softirqs: tasklet
+   is running only on one CPU simultaneously.
+
+   Main feature differing them of BHs: different tasklets
+   may be run simultaneously on different CPUs.
+
+   Properties:
+   * If tasklet_schedule() is called, then tasklet is guaranteed
+     to be executed on some cpu at least once after this.
+   * If the tasklet is already scheduled, but its excecution is still not
+     started, it will be executed only once.
+   * If this tasklet is already running on another CPU (or schedule is called
+     from tasklet itself), it is rescheduled for later.
+   * Tasklet is strictly serialized wrt itself, but not
+     wrt another tasklets. If client needs some intertask synchronization,
+     he makes it with spinlocks.
+ */
+
+struct tasklet_struct
+{
+	struct tasklet_struct *next;
+	unsigned long state;
+	atomic_t count;
+	void (*func)(unsigned long);
+	unsigned long data;
+	int	*unlocked;
+};
+
+#define DECLARE_TASKLET(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data, NULL }
+
+#define DECLARE_TASKLET_DISABLED(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data, NULL }
+
+
+enum
+{
+	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
+	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
+};
+
+struct tasklet_head
+{
+	struct tasklet_struct *list;
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+extern struct tasklet_head tasklet_vec[NR_CPUS];
+extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
+
+#ifdef CONFIG_SMP
+static inline int tasklet_trylock(struct tasklet_struct *t)
+{
+	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock(struct tasklet_struct *t)
+{
+	smp_mb__before_clear_bit(); 
+	clear_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock_self(struct tasklet_struct *t)
+{
+	*t->unlocked = 1;
+	t->unlocked = NULL;
+	tasklet_unlock(t);
+}
+
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
+#else
+#define tasklet_trylock(t) 1
+#define tasklet_unlock_wait(t) do { } while (0)
+#define tasklet_unlock(t) do { } while (0)
+#endif
+
+extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
+
+static inline void tasklet_schedule(struct tasklet_struct *t)
+{
+	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+		__tasklet_schedule(t);
+}
+
+extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
+
+static inline void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+		__tasklet_hi_schedule(t);
+}
+
+
+static inline void tasklet_disable_nosync(struct tasklet_struct *t)
+{
+	atomic_inc(&t->count);
+	smp_mb__after_atomic_inc();
+}
+
+static inline void tasklet_disable(struct tasklet_struct *t)
+{
+	tasklet_disable_nosync(t);
+	tasklet_unlock_wait(t);
+	smp_mb();
+}
+
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+	smp_mb__before_atomic_dec();
+	atomic_dec(&t->count);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+	smp_mb__before_atomic_dec();
+	atomic_dec(&t->count);
+}
+
+extern void tasklet_kill(struct tasklet_struct *t);
+extern void tasklet_init(struct tasklet_struct *t,
+			 void (*func)(unsigned long), unsigned long data);
+
+#ifdef CONFIG_SMP
+
+#define SMP_TIMER_NAME(name) name##__thr
+
+#define SMP_TIMER_DEFINE(name, task) \
+DECLARE_TASKLET(task, name##__thr, 0); \
+static void name (unsigned long dummy) \
+{ \
+	tasklet_schedule(&(task)); \
+}
+
+#else /* CONFIG_SMP */
+
+#define SMP_TIMER_NAME(name) name
+#define SMP_TIMER_DEFINE(name, task)
+
+#endif /* CONFIG_SMP */
+
+
+#endif /* __LINUX__TASKLET_H */
diff -urN v2.4.19-pre5/include/linux/tqueue.h linux.diff/include/linux/tqueue.h
--- v2.4.19-pre5/include/linux/tqueue.h	Wed Apr  3 21:12:49 2002
+++ linux.diff/include/linux/tqueue.h	Tue May  7 17:15:39 2002
@@ -67,6 +67,7 @@
 #define TQ_ACTIVE(q)		(!list_empty(&q))
 
 extern task_queue tq_timer, tq_immediate, tq_disk;
+extern struct tq_struct run_disk_tq;
 
 /*
  * To implement your own list of active bottom halfs, use the following
diff -urN v2.4.19-pre5/include/linux/types.h linux.diff/include/linux/types.h
--- v2.4.19-pre5/include/linux/types.h	Wed Apr  3 21:10:29 2002
+++ linux.diff/include/linux/types.h	Tue Apr  2 19:14:27 2002
@@ -127,4 +127,9 @@
 	char			f_fpack[6];
 };
 
+/* kernel typedefs -- they belong here. */
+#ifdef __KERNEL__
+typedef struct kvec_cb kvec_cb_t;
+#endif /* __KERNEL__ */
+
 #endif /* _LINUX_TYPES_H */
diff -urN v2.4.19-pre5/include/linux/vsyscall.h linux.diff/include/linux/vsyscall.h
--- v2.4.19-pre5/include/linux/vsyscall.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/vsyscall.h	Tue Apr  2 18:56:58 2002
@@ -0,0 +1,20 @@
+#ifndef _LINUX__VSYSCALL_H
+#define _LINUX__VSYSCALL_H
+
+struct vsys_cpudata {
+	unsigned long	context_switches;
+	unsigned long	tv_sec;
+	unsigned long	tsc_low, tsc_high;
+	unsigned long	cycles_per_sec;
+};
+
+union vsys_union {
+	struct vsys_cpudata	data;
+	char			pad[128];
+};
+
+extern union vsys_union	vsys_cpudata[256] __attribute__((section(".data.vsyscall")));
+
+#define vsys_data(cpu)	(&vsys_cpudata[cpu].data)
+
+#endif /*ndef _LINUX__VSYSCALL_H*/
diff -urN v2.4.19-pre5/include/linux/wait.h linux.diff/include/linux/wait.h
--- v2.4.19-pre5/include/linux/wait.h	Wed Apr  3 21:12:49 2002
+++ linux.diff/include/linux/wait.h	Tue May  7 17:15:39 2002
@@ -28,17 +28,20 @@
 #define WAITQUEUE_DEBUG 0
 #endif
 
+typedef struct __wait_queue wait_queue_t;
+typedef void (*wait_queue_func_t)(wait_queue_t *wait);
+
 struct __wait_queue {
 	unsigned int flags;
 #define WQ_FLAG_EXCLUSIVE	0x01
 	struct task_struct * task;
 	struct list_head task_list;
+	wait_queue_func_t func;
 #if WAITQUEUE_DEBUG
 	long __magic;
 	long __waker;
 #endif
 };
-typedef struct __wait_queue wait_queue_t;
 
 /*
  * 'dual' spinlock architecture. Can be switched between spinlock_t and
@@ -137,6 +140,7 @@
 #endif
 
 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
+	func:		NULL,						\
 	task:		tsk,						\
 	task_list:	{ NULL, NULL },					\
 			 __WAITQUEUE_DEBUG_INIT(name)}
@@ -174,6 +178,22 @@
 #endif
 	q->flags = 0;
 	q->task = p;
+	q->func = NULL;
+#if WAITQUEUE_DEBUG
+	q->__magic = (long)&q->__magic;
+#endif
+}
+
+static inline void init_waitqueue_func_entry(wait_queue_t *q,
+					wait_queue_func_t func)
+{
+#if WAITQUEUE_DEBUG
+	if (!q || !p)
+		WQ_BUG();
+#endif
+	q->flags = 0;
+	q->task = NULL;
+	q->func = func;
 #if WAITQUEUE_DEBUG
 	q->__magic = (long)&q->__magic;
 #endif
@@ -231,6 +251,22 @@
 	list_del(&old->task_list);
 }
 
+#define add_wait_queue_cond(q, wait, cond) \
+	({							\
+		unsigned long flags;				\
+		int _raced = 0;					\
+		wq_write_lock_irqsave(&(q)->lock, flags);	\
+		(wait)->flags = 0;				\
+		__add_wait_queue((q), (wait));			\
+		rmb();						\
+		if (!(cond)) {					\
+			_raced = 1;				\
+			__remove_wait_queue((q), (wait));	\
+		}						\
+		wq_write_unlock_irqrestore(&(q)->lock, flags);	\
+		_raced;						\
+	})
+
 #endif /* __KERNEL__ */
 
 #endif
diff -urN v2.4.19-pre5/include/linux/worktodo.h linux.diff/include/linux/worktodo.h
--- v2.4.19-pre5/include/linux/worktodo.h	Wed Dec 31 19:00:00 1969
+++ linux.diff/include/linux/worktodo.h	Tue May  7 17:15:39 2002
@@ -0,0 +1,75 @@
+/*
+ *	Written by Benjamin LaHaise.
+ *
+ *	Copyright 2000-2001 Red Hat, Inc.
+ *
+ *	#include "gpl.h"
+ *
+ *	Basic design idea from Jeff Merkey.
+ *	Stack based on ideas from Ingo Molnar.
+ */
+#ifndef __LINUX__WORKTODO_H
+#define __LINUX__WORKTODO_H
+
+#ifndef _LINUX_WAIT_H
+#include <linux/wait.h>
+#endif
+#ifndef _LINUX_TQUEUE_H
+#include <linux/tqueue.h>
+#endif
+
+struct wtd_stack {
+	void	(*fn)(void *data);
+	void	*data;
+};
+
+struct worktodo {
+	wait_queue_t		wait;
+	struct tq_struct	tq;
+
+	void			*data;	/* for use by the wtd_ primatives */
+
+	int			sp;
+	struct wtd_stack	stack[3];
+};
+
+/* FIXME NOTE: factor from kernel/context.c */
+#define wtd_init(wtd, routine) do {			\
+	INIT_TQUEUE(&(wtd)->tq, (routine), (wtd));	\
+	(wtd)->data = 0;				\
+	(wtd)->sp = 0;					\
+} while (0)
+
+#define wtd_queue(wtd)	schedule_task(&(wtd)->tq)
+
+#define wtd_push(wtd, action, wtddata)			\
+do {							\
+	(wtd)->stack[(wtd)->sp].fn = (wtd)->tq.routine;	\
+	(wtd)->stack[(wtd)->sp++].data = (wtd)->tq.data;\
+	(wtd)->tq.routine = action;			\
+	(wtd)->tq.data = wtddata;			\
+} while (0)
+
+static inline void wtd_pop(struct worktodo *wtd)
+{
+	if (wtd->sp) {
+		wtd->sp--;
+		wtd->tq.routine = wtd->stack[wtd->sp].fn;
+		wtd->tq.data = wtd->stack[wtd->sp].data;
+	}
+}
+
+#define wtd_set_action(wtd, action, wtddata)	INIT_TQUEUE(&(wtd)->tq, action, wtddata)
+
+struct page;
+struct buffer_head;
+extern int wtd_lock_page(struct worktodo *wtd, struct page *page);
+extern int wtd_wait_on_buffer(struct worktodo *wtd, struct buffer_head *bh);
+
+#if 0	/* not implemented yet */
+extern void wtd_down(struct worktodo *wtd, struct semaphore *sem);
+extern void wtd_down_write(struct worktodo *wtd, struct rw_semaphore *sem);
+extern void wtd_down_read(struct worktodo *wtd, struct rw_semaphore *sem);
+#endif
+
+#endif /* __LINUX__WORKTODO_H */
diff -urN v2.4.19-pre5/include/net/sock.h linux.diff/include/net/sock.h
--- v2.4.19-pre5/include/net/sock.h	Wed Apr  3 21:12:56 2002
+++ linux.diff/include/net/sock.h	Tue May 14 12:55:01 2002
@@ -105,7 +105,18 @@
 
 #include <asm/atomic.h>
 #include <net/dst.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fcblist.h>
 
+#include <linux/aio.h>
+
+
+struct sock_iocb {
+	struct list_head	list;
+	kvec_cb_t		cb;
+	struct kvec_dst		dst;
+};
 
 /* The AF_UNIX specific socket options */
 struct unix_opt {
@@ -283,9 +294,9 @@
 	/* Data for direct copy to user */
 	struct {
 		struct sk_buff_head	prequeue;
-		int			memory;
 		struct task_struct	*task;
 		struct iovec		*iov;
+		int			memory;
 		int			len;
 	} ucopy;
 
@@ -560,6 +571,9 @@
 		struct sk_buff *tail;
 	} backlog;
 
+	struct list_head	kvec_read_list;
+	struct list_head	kvec_write_list;
+
 	rwlock_t		callback_lock;
 
 	/* Error queue, rarely used. */
@@ -721,6 +735,8 @@
 	int			(*recvmsg)(struct sock *sk, struct msghdr *msg,
 					int len, int noblock, int flags, 
 					int *addr_len);
+	int		(*kvec_read)(struct sock *, kvec_cb_t cb, int len);
+	int		(*kvec_write)(struct sock *, kvec_cb_t cb, int len);
 	int			(*bind)(struct sock *sk, 
 					struct sockaddr *uaddr, int addr_len);
 
@@ -795,7 +811,7 @@
 	if ((__sk)->backlog.tail != NULL) \
 		__release_sock(__sk); \
 	(__sk)->lock.users = 0; \
-        if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
+        wake_up(&((__sk)->lock.wq)); \
 	spin_unlock_bh(&((__sk)->lock.slock)); \
 } while(0)
 
@@ -1215,8 +1231,13 @@
 
 static inline void sk_wake_async(struct sock *sk, int how, int band)
 {
-	if (sk->socket && sk->socket->fasync_list)
+	if (sk->socket) {
+		if (sk->socket->file)
+			file_send_notify(sk->socket->file, ion_band_table[band - POLL_IN],
+					poll_band_table[band - POLL_IN]);
+		if (sk->socket->fasync_list)
 		sock_wake_async(sk->socket, how, band);
+	}
 }
 
 #define SOCK_MIN_SNDBUF 2048
diff -urN v2.4.19-pre5/include/net/tcp.h linux.diff/include/net/tcp.h
--- v2.4.19-pre5/include/net/tcp.h	Wed Apr  3 21:12:57 2002
+++ linux.diff/include/net/tcp.h	Tue May 14 12:56:37 2002
@@ -732,6 +732,8 @@
 					    struct msghdr *msg,
 					    int len, int nonblock, 
 					    int flags, int *addr_len);
+extern int tcp_kvec_read(struct sock *sk, kvec_cb_t cb, int len);
+extern int tcp_kvec_write(struct sock *sk, kvec_cb_t cb, int len);
 
 extern int			tcp_listen_start(struct sock *sk);
 
diff -urN v2.4.19-pre5/kernel/fork.c linux.diff/kernel/fork.c
--- v2.4.19-pre5/kernel/fork.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/kernel/fork.c	Mon Apr 29 15:54:22 2002
@@ -46,6 +46,16 @@
 	wq_write_unlock_irqrestore(&q->lock, flags);
 }
 
+void add_wait_queue_exclusive_lifo(wait_queue_head_t *q, wait_queue_t * wait)
+{
+	unsigned long flags;
+
+	wq_write_lock_irqsave(&q->lock, flags);
+	wait->flags = WQ_FLAG_EXCLUSIVE;
+	__add_wait_queue(q, wait);
+	wq_write_unlock_irqrestore(&q->lock, flags);
+}
+
 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
 {
 	unsigned long flags;
@@ -216,6 +226,7 @@
 
 static struct mm_struct * mm_init(struct mm_struct * mm)
 {
+	mm->ioctx_list = NULL;
 	atomic_set(&mm->mm_users, 1);
 	atomic_set(&mm->mm_count, 1);
 	init_rwsem(&mm->mmap_sem);
@@ -251,6 +262,9 @@
  */
 inline void __mmdrop(struct mm_struct *mm)
 {
+	if (mm->ioctx_list)
+		BUG();
+
 	if (mm == &init_mm) BUG();
 	pgd_free(mm->pgd);
 	destroy_context(mm);
@@ -269,6 +283,7 @@
 		list_del(&mm->mmlist);
 		mmlist_nr--;
 		spin_unlock(&mmlist_lock);
+		exit_aio(mm);
 		exit_mmap(mm);
 		mmdrop(mm);
 	}
diff -urN v2.4.19-pre5/kernel/sched.c linux.diff/kernel/sched.c
--- v2.4.19-pre5/kernel/sched.c	Tue Jan  1 14:09:35 2002
+++ linux.diff/kernel/sched.c	Tue Apr  2 18:56:57 2002
@@ -705,33 +705,44 @@
 }
 
 /*
- * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just wake everything
- * up.  If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
- * non-exclusive tasks and one exclusive task.
+ * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small
+ * +ve number) then we wake all the non-exclusive tasks and one exclusive task.
  *
  * There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns zero
- * in this (rare) case, and we handle it by contonuing to scan the queue.
+ * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
+ * zero in this (rare) case, and we handle it by contonuing to scan the queue.
  */
 static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
 			 	     int nr_exclusive, const int sync)
 {
-	struct list_head *tmp;
+	struct list_head *tmp, *next;
 	struct task_struct *p;
 
 	CHECK_MAGIC_WQHEAD(q);
 	WQ_CHECK_LIST_HEAD(&q->task_list);
 	
-	list_for_each(tmp,&q->task_list) {
+	list_for_each_safe(tmp, next, &q->task_list) {
 		unsigned int state;
-                wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
+		wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
+		wait_queue_func_t func;
 
 		CHECK_MAGIC(curr->__magic);
+		func = curr->func;
+		if (func) {
+			unsigned flags = curr->flags;
+			func(curr);
+			if ((flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+				break;
+			continue;
+		}
 		p = curr->task;
 		state = p->state;
 		if (state & mode) {
 			WQ_NOTE_WAKER(curr);
-			if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+			if (try_to_wake_up(p, sync) &&
+			    (curr->flags & WQ_FLAG_EXCLUSIVE) &&
+			    !--nr_exclusive)
 				break;
 		}
 	}
diff -urN v2.4.19-pre5/kernel/sysctl.c linux.diff/kernel/sysctl.c
--- v2.4.19-pre5/kernel/sysctl.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/kernel/sysctl.c	Mon Apr 29 17:58:53 2002
@@ -30,6 +30,7 @@
 #include <linux/init.h>
 #include <linux/sysrq.h>
 #include <linux/highuid.h>
+#include <linux/aio.h>
 
 #include <asm/uaccess.h>
 
@@ -284,6 +285,8 @@
 	{0}
 };
 
+extern int user_pinned_pages;
+
 static ctl_table fs_table[] = {
 	{FS_NRINODE, "inode-nr", &inodes_stat, 2*sizeof(int),
 	 0444, NULL, &proc_dointvec},
@@ -309,6 +312,16 @@
 	 sizeof(int), 0644, NULL, &proc_dointvec},
 	{FS_LEASE_TIME, "lease-break-time", &lease_break_time, sizeof(int),
 	 0644, NULL, &proc_dointvec},
+	{FS_AIO_NR, "aio-nr", &aio_nr, sizeof(aio_nr),
+	 0444, NULL, &proc_dointvec},
+	{FS_AIO_MAX_NR, "aio-max-nr", &aio_max_nr, sizeof(aio_max_nr),
+	 0644, NULL, &proc_dointvec},
+	{FS_AIO_MAX_SIZE, "aio-max-size", &aio_max_size, sizeof(aio_max_size),
+	 0644, NULL, &proc_dointvec},
+	{FS_AIO_MAX_PINNED, "aio-max-pinned", &aio_max_pinned, sizeof(aio_max_pinned),
+	 0644, NULL, &proc_dointvec},
+	{FS_AIO_MAX_PINNED+1, "aio-pinned", &user_pinned_pages, 4,
+	 0644, NULL, &proc_dointvec},
 	{0}
 };
 
diff -urN v2.4.19-pre5/mm/Makefile linux.diff/mm/Makefile
--- v2.4.19-pre5/mm/Makefile	Wed Apr  3 21:04:41 2002
+++ linux.diff/mm/Makefile	Tue Apr  2 18:56:57 2002
@@ -17,5 +17,6 @@
 	    shmem.o
 
 obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-y += wtd.o
 
 include $(TOPDIR)/Rules.make
diff -urN v2.4.19-pre5/mm/filemap.c linux.diff/mm/filemap.c
--- v2.4.19-pre5/mm/filemap.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/mm/filemap.c	Mon Apr 29 18:30:59 2002
@@ -29,6 +29,8 @@
 #include <asm/mman.h>
 
 #include <linux/highmem.h>
+#include <linux/worktodo.h>
+#include <linux/iobuf.h>
 
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
@@ -774,7 +776,7 @@
  * at a cost of "thundering herd" phenomena during rare hash
  * collisions.
  */
-static inline wait_queue_head_t *page_waitqueue(struct page *page)
+static inline wait_queue_head_t *__page_waitqueue(struct page *page)
 {
 	const zone_t *zone = page_zone(page);
 	wait_queue_head_t *wait = zone->wait_table;
@@ -805,6 +807,13 @@
 	return &wait[hash];
 }
 
+wait_queue_head_t *page_waitqueue(struct page *page)
+{
+	return __page_waitqueue(page);
+}
+
+#define page_waitqueue(page) __page_waitqueue(page)
+
 /* 
  * Wait for a page to get unlocked.
  *
@@ -1185,7 +1194,7 @@
 
 static void generic_file_readahead(int reada_ok,
 	struct file * filp, struct inode * inode,
-	struct page * page)
+	struct page * page, int flags)
 {
 	unsigned long end_index;
 	unsigned long index = page->index;
@@ -1315,7 +1324,7 @@
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
  */
-void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
+void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor, int flags)
 {
 	struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
 	struct inode *inode = mapping->host;
@@ -1324,10 +1333,17 @@
 	int reada_ok;
 	int error;
 	int max_readahead = get_max_readahead(inode);
+	loff_t pos;
+
+	pos = *ppos;
+	if (unlikely(pos < 0)) {
+		desc->error = -EINVAL;
+		return;
+	}
 
 	cached_page = NULL;
-	index = *ppos >> PAGE_CACHE_SHIFT;
-	offset = *ppos & ~PAGE_CACHE_MASK;
+	index = pos >> PAGE_CACHE_SHIFT;
+	offset = pos & ~PAGE_CACHE_MASK;
 
 /*
  * If the current position is outside the previous read-ahead window, 
@@ -1374,13 +1390,17 @@
 
 		end_index = inode->i_size >> PAGE_CACHE_SHIFT;
 			
-		if (index > end_index)
+		if (index > end_index) {
+			desc->error = 0;
 			break;
+		}
 		nr = PAGE_CACHE_SIZE;
 		if (index == end_index) {
 			nr = inode->i_size & ~PAGE_CACHE_MASK;
-			if (nr <= offset)
+			if (nr <= offset) {
+				desc->error = 0;
 				break;
+			}
 		}
 
 		nr = nr - offset;
@@ -1400,7 +1420,7 @@
 
 		if (!Page_Uptodate(page))
 			goto page_not_up_to_date;
-		generic_file_readahead(reada_ok, filp, inode, page);
+		generic_file_readahead(reada_ok, filp, inode, page, flags);
 page_ok:
 		/* If users can be writing to this page using arbitrary
 		 * virtual addresses, take care about potential aliasing
@@ -1440,13 +1460,23 @@
  * Ok, the page was not immediately readable, so let's try to read ahead while we're at it..
  */
 page_not_up_to_date:
-		generic_file_readahead(reada_ok, filp, inode, page);
+		generic_file_readahead(reada_ok, filp, inode, page, flags);
 
 		if (Page_Uptodate(page))
 			goto page_ok;
 
 		/* Get exclusive access to the page ... */
-		lock_page(page);
+		if (flags & F_ATOMIC) {
+			if (TryLockPage(page)) {
+				if (Page_Uptodate(page))
+					goto page_ok;
+				desc->error = -EWOULDBLOCKIO;
+				page_cache_release(page);
+				break;
+			}
+			printk("page_not_up_to_date: atomic trylock succeeded\n");
+		} else
+			lock_page(page);
 
 		/* Did it get unhashed before we got the lock? */
 		if (!page->mapping) {
@@ -1470,11 +1500,12 @@
 				goto page_ok;
 
 			/* Again, try some read-ahead while waiting for the page to finish.. */
-			generic_file_readahead(reada_ok, filp, inode, page);
-			wait_on_page(page);
+			generic_file_readahead(reada_ok, filp, inode, page, flags);
+			if (!(flags & F_ATOMIC))
+				wait_on_page(page);
 			if (Page_Uptodate(page))
 				goto page_ok;
-			error = -EIO;
+			error = (flags & F_ATOMIC) ? -EWOULDBLOCKIO : -EIO;
 		}
 
 		/* UHHUH! A synchronous read error occurred. Report it */
@@ -1483,6 +1514,11 @@
 		break;
 
 no_cached_page:
+		if (flags & F_ATOMIC) {
+			spin_unlock(&pagecache_lock);
+			desc->error = -EWOULDBLOCKIO;
+			break;
+		}
 		/*
 		 * Ok, it wasn't cached, so we need to create a new
 		 * page..
@@ -1637,6 +1673,11 @@
  */
 ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
 {
+	return generic_file_new_read(filp, buf, count, ppos, 0);
+}
+
+ssize_t generic_file_new_read(struct file * filp, char * buf, size_t count, loff_t *ppos, int flags)
+{
 	ssize_t retval;
 
 	if ((ssize_t) count < 0)
@@ -1656,7 +1697,7 @@
 			desc.count = count;
 			desc.buf = buf;
 			desc.error = 0;
-			do_generic_file_read(filp, ppos, &desc, file_read_actor);
+			do_generic_file_read(filp, ppos, &desc, file_read_actor, flags);
 
 			retval = desc.written;
 			if (!retval)
@@ -1781,7 +1822,7 @@
 		desc.count = count;
 		desc.buf = (char *) out_file;
 		desc.error = 0;
-		do_generic_file_read(in_file, ppos, &desc, file_send_actor);
+		do_generic_file_read(in_file, ppos, &desc, file_send_actor, 0);
 
 		retval = desc.written;
 		if (!retval)
@@ -3177,3 +3218,654 @@
 		panic("Failed to allocate page hash table\n");
 	memset((void *)page_hash_table, 0, PAGE_HASH_SIZE * sizeof(struct page *));
 }
+
+/* address_space_map
+ *	Maps a series of pages from the page cache into the given array.
+ */
+static int address_space_map(struct address_space *as, unsigned long index,
+		int nr, struct page **pages,
+		int *nr_newp, struct page **new_pages)
+{
+	struct page *cached_page = NULL;
+	int nr_new = 0;
+	int ret;
+
+	if (unlikely(nr <= 0)) {
+		*nr_newp = nr_new;
+		return 0;
+	}
+
+	ret = 0;
+
+	spin_lock(&pagecache_lock);
+
+	while (nr > 0) {
+		struct page **hash = page_hash(as, index);
+		struct page *page;
+
+		page = __find_page_nolock(as, index, *hash);
+		if (page) {
+			page_cache_get(page);
+got_page:
+			pages[ret++] = page;
+			index++;
+			nr--;
+			continue;
+		}
+
+		if (cached_page) {
+			__add_to_page_cache(cached_page, as, index, hash);
+			nr_new++;
+			*new_pages++ = page = cached_page;
+			cached_page = NULL;
+			goto got_page;
+		}
+		spin_unlock(&pagecache_lock);
+
+		cached_page = page_cache_alloc(as);
+		if (!cached_page)
+			goto out;
+
+		/* Okay, we now have an allocated page.  Retry
+		 * the search and add. */
+		spin_lock(&pagecache_lock);
+	}
+
+	spin_unlock(&pagecache_lock);
+
+out:
+	if (cached_page)
+		page_cache_release(cached_page);
+
+	*nr_newp = nr_new;
+	return ret ? ret : -ENOMEM;
+}
+
+struct iodesc {
+	struct worktodo	wtd;
+
+	struct page	*good_page;	/* the highest Uptodate page */
+	int		good_idx;
+	int		err;
+	int		did_read;
+	int		rw;
+
+	struct page	**pages;
+	struct page	**new_pages;
+	struct page	**cur_pagep;
+	int		nr_pages;
+	int		nr_new_pages;
+
+	struct address_space *as;
+	struct file	*file;
+	kvec_cb_t	cb;
+
+	size_t		size;
+	unsigned long	transferred;
+	unsigned	offset;
+	struct kveclet	*veclet;
+
+	struct kvec_dst	src;
+
+	int		sync;
+
+#define READDESC_NR_DEF	3
+	struct page *def_pages[READDESC_NR_DEF];
+	struct page *def_new_pages[READDESC_NR_DEF];
+};
+
+static void __iodesc_free(struct iodesc *io, int unlock)
+{
+	kvec_cb_t cb;
+	ssize_t res;
+
+	if (unlock) {
+		unsigned i;
+		for (i=0; i<io->nr_pages; i++) {
+			struct page *page = io->pages[i];
+			UnlockPage(page);
+			page_cache_release(page);
+		}
+	} else {
+		unsigned i;
+		for (i=0; i<io->nr_pages; i++)
+			page_cache_release(io->pages[i]);
+	}
+
+	if (io->new_pages != io->def_new_pages)
+		kfree(io->new_pages);
+	if (io->pages != io->def_pages)
+		kfree(io->pages);
+
+	cb = io->cb;
+	res = io->transferred ? io->transferred : io->err;
+	kfree(io);
+
+	cb.fn(cb.data, cb.vec, res);
+}
+
+/* By the time this function is called, all of the pages prior to
+ * the current good_idx have been released appropriately.  The remaining
+ * duties are to release any remaining pages and to honour O_SYNC.
+ */
+static void __iodesc_finish_write(struct iodesc *io)
+{
+	pr_debug("__iodesc_finish_write(%p)\n", io);
+
+	__iodesc_free(io, WRITE == io->rw);
+}
+
+/* This is mostly ripped from generic_file_write */
+static int __iodesc_write_page(struct iodesc *io, struct page *page)
+{
+	char *kaddr = kmap(page);
+	unsigned long bytes;
+	unsigned long offset;
+	long status;
+	int done = 0;
+
+	offset = io->offset;
+	kaddr += offset;
+
+	bytes = PAGE_CACHE_SIZE - offset;
+	if (io->size < bytes)
+		bytes = io->size;
+
+	pr_debug("__iodesc_write_page(%p (%lu), %lu %lu %lu)\n", page, page->index, offset, bytes);
+
+	io->err = io->as->a_ops->prepare_write(io->file, page,
+						offset, offset + bytes);
+	if (unlikely(io->err)) {
+		pr_debug("prepare_write: %d\n", io->err);
+		kunmap(page);
+		return 1;
+	}
+
+	kvec_dst_map(&io->src);
+	memcpy_from_kvec_dst(kaddr, &io->src, bytes);
+	kvec_dst_unmap(&io->src);	/* commit_write may block */
+
+	flush_dcache_page(page);
+	status = io->as->a_ops->commit_write(io->file, page,
+						offset, offset+bytes);
+
+	/* We don't handle short writes */
+	if (status > 0 && status != bytes)
+		done = 1;
+
+	if (!status)
+		status = bytes;
+
+	if (likely(status > 0)) {
+		io->transferred += status;
+		io->size -= status;
+		io->offset = (offset + status) & (PAGE_CACHE_SIZE - 1);
+
+		if (io->offset)
+			done = 1;
+	} else {
+		io->err = status;
+		done = 1;
+	}
+
+	kunmap(page);
+	return done;
+}
+
+void __iodesc_sync_wait_page(void *data)
+{
+	struct iodesc *io = data;
+
+	do {
+		struct buffer_head *bh, *head = io->pages[io->good_idx]->buffers;
+
+		if (!head)
+			continue;
+
+		bh = head;
+		do {
+			if (buffer_locked(bh)) {
+				pr_debug("waiting on bh=%pi io=%p\n", bh, io);
+				if (!wtd_wait_on_buffer(&io->wtd, bh))
+					return;
+			}
+			if (buffer_req(bh) && !buffer_uptodate(bh)) {
+				pr_debug("io err bh=%p (%p)\n", bh, io);
+				io->err = -EIO;
+				break;
+			}
+		} while ((bh = bh->b_this_page) != head);
+	} while (!io->err && ++io->good_idx < io->nr_pages) ;
+
+	pr_debug("finish_write(%p)\n", io);
+	__iodesc_finish_write(io);
+}
+
+static void __iodesc_do_write(void *data)
+{
+	struct iodesc *io = data;
+	unsigned i;
+
+	for (i=0; i<io->nr_pages; i++) {
+		if (__iodesc_write_page(io, io->pages[i]))
+			break;
+	}
+
+	up(&io->file->f_dentry->d_inode->i_sem);
+
+	if (io->sync) {
+		io->good_idx = 0;
+
+		pr_debug("writing out pages(%p)\n", io);
+		for (i=0; i<io->nr_pages; i++) {
+			if (io->pages[i]->buffers)
+				writeout_one_page(io->pages[i]);
+		}
+
+		pr_debug("calling __iodesc_sync_wait_page(%p)\n", io);
+		wtd_set_action(&io->wtd, __iodesc_sync_wait_page, io);
+		__iodesc_sync_wait_page(io);
+		return;
+	}
+
+	__iodesc_finish_write(io);
+}
+
+static void __iodesc_write_lock_next_page(void *data)
+{
+	struct iodesc *io = data;
+	pr_debug("__iodesc_write_next_page(%p)\n", io);
+
+	while (io->good_idx < io->nr_pages) {
+		io->good_page = io->pages[io->good_idx++];
+		if (io->good_page == *io->cur_pagep)
+			io->cur_pagep++;
+		else {
+			if (!wtd_lock_page(&io->wtd, io->good_page))
+				return;
+		}
+	}
+
+	//Is this faster? __iodesc_do_write(io);
+	wtd_set_action(&io->wtd, __iodesc_do_write, io);
+	wtd_queue(&io->wtd);
+}
+
+static void __generic_file_write_iodesc(struct iodesc *io)
+{
+	struct inode *inode = io->file->f_dentry->d_inode;
+	time_t now = CURRENT_TIME;
+
+	remove_suid(inode);
+	if (inode->i_ctime != now || inode->i_mtime != now) {
+		inode->i_ctime = inode->i_mtime = now;
+		mark_inode_dirty_sync(inode);
+	}
+
+	wtd_set_action(&io->wtd, __iodesc_write_lock_next_page, io);
+	io->sync = !!(io->file->f_flags & O_SYNC);
+	io->good_idx = 0;
+	io->cur_pagep = io->new_pages;
+	__iodesc_write_lock_next_page(io);
+}
+
+static void __iodesc_read_finish(struct iodesc *io)
+{
+	struct page **src_pagep;
+	char *dst_addr, *src_addr;
+	int src_off;
+	size_t size;
+	size_t valid;
+
+	struct kveclet *veclet = io->veclet;
+	struct page *dst_page = veclet->page;
+	int dst_len = veclet->length;
+	int dst_off = veclet->offset;
+
+
+	pr_debug("__iodesc_read_finish: good_idx = %d\n", io->good_idx);
+	if (io->good_idx <= 0)
+		goto no_data;
+
+	size = io->size;
+	src_off = io->offset;
+	src_pagep = io->pages;
+	src_addr = kmap(*src_pagep);
+
+	valid = (size_t)io->good_idx << PAGE_CACHE_SHIFT;
+	valid -= src_off;
+	pr_debug("size=%d valid=%d src_off=%d\n", size, valid, src_off);
+
+	if (valid < size)
+		size = valid;
+
+	dst_addr = kmap(veclet->page);
+
+	while (size > 0) {
+		int this = PAGE_CACHE_SIZE - src_off;
+		if ((PAGE_SIZE - dst_off) < this)
+			this = PAGE_SIZE - dst_off;
+		if (size < this)
+			this = size;
+		pr_debug("this=%d src_off=%d dst_off=%d dst_len=%d\n",
+			this, src_off, dst_off, dst_len);
+		memcpy(dst_addr + dst_off, src_addr + src_off, this);
+
+		src_off += this;
+		dst_off += this;
+		dst_len -= this;
+		size -= this;
+		io->transferred += this;
+		pr_debug("read_finish: this=%d transferred=%d\n",
+			 this, io->transferred);
+
+		if (size <= 0)
+			break;
+
+		if (dst_len <= 0) {
+			kunmap(dst_page);
+			veclet++;
+			dst_page = veclet->page;
+			dst_off = veclet->offset;
+			dst_len = veclet->length;
+			dst_addr = kmap(dst_page);
+		}
+
+		if (src_off >= PAGE_SIZE) { /* FIXME: PAGE_CACHE_SIZE */
+			kunmap(*src_pagep);
+			pr_debug("page(%lu)->count = %d\n",
+				 (*src_pagep)->index,
+				 atomic_read(&(*src_pagep)->count));
+			src_pagep++;
+			src_addr = kmap(*src_pagep);
+			src_off = 0;
+		}
+	}
+	kunmap(dst_page);
+	kunmap(*src_pagep);
+no_data:
+	__iodesc_free(io, 0);
+}
+
+static void __iodesc_make_uptodate(void *data)
+{
+	struct iodesc *io = data;
+	struct page *page = io->good_page;
+	int locked = 1;
+
+	pr_debug("__iodesc_make_uptodate: io=%p index=%lu\n", io, page->index);
+again:
+	while (Page_Uptodate(page)) {
+		pr_debug("page index %lu uptodate\n", page->index);
+		if (locked) {
+			UnlockPage(page);
+			locked = 0;
+		}
+		io->did_read = 0;
+		io->good_idx++;
+		if (io->good_idx >= io->nr_pages) {
+			__iodesc_read_finish(io);
+			return;
+		}
+		page = io->good_page = io->pages[io->good_idx];
+		pr_debug("__iodesc_make_uptodate: index=%lu\n", page->index);
+	}
+
+	if (!locked) {
+		if (!wtd_lock_page(&io->wtd, page))
+			return;
+		locked = 1;
+	}
+
+	if (!io->did_read) {
+		/* We haven't tried reading this page before, give it a go. */
+		pr_debug("attempting to read %lu\n", page->index);
+		io->did_read = 1;
+		locked = 0;
+		io->err = page->mapping->a_ops->readpage(io->file, page);
+		if (!io->err) {
+			if (Page_Uptodate(page))
+				goto again;
+			if (wtd_lock_page(&io->wtd, page)) {
+				locked = 1;
+				goto again;
+			}
+			return;
+		}
+	}
+
+	if (locked)
+		UnlockPage(page);
+
+	/* We've already read this page before.  Set err to EIO and quite */
+	if (!io->err)
+		io->err = -EIO;
+	__iodesc_read_finish(io);
+}
+
+static void __wtdgeneric_file_read_iodesc(void *data);
+
+static void __generic_file_read_iodesc(struct iodesc *io, int mayblock)
+{
+	int (*readpage)(struct file *, struct page *);
+	int i;
+
+	wtd_set_action(&io->wtd, __iodesc_make_uptodate, io);
+	readpage = io->as->a_ops->readpage;
+	for (i=0; i<io->nr_new_pages; i++) {
+		int ret;
+		if (!mayblock) {
+			wtd_set_action(&io->wtd, __wtdgeneric_file_read_iodesc, io);
+			wtd_queue(&io->wtd);
+			return;
+		}
+		ret = readpage(io->file, io->new_pages[i]);
+		if (ret)
+			printk(KERN_DEBUG "__generic_file_read_kiovec: readpage(%lu) = %d\n", io->new_pages[i]->index, ret);
+	}
+
+	for (i=0; i<io->nr_pages; i++) {
+		struct page *page = io->pages[i];
+		if (Page_Uptodate(page)) {
+			pr_debug("__generic_file_read_iodesc: %lu is uptodate\n", page->index);
+			continue;
+		}
+
+		if (!mayblock) {
+			wtd_set_action(&io->wtd, __wtdgeneric_file_read_iodesc, io);
+			wtd_queue(&io->wtd);
+			return;
+		}
+		if (!TryLockPage(page)) {
+			int ret = readpage(io->file, page);
+			if (ret)
+				printk(KERN_DEBUG "__generic_file_read_iodesc: readpage(%lu): %d\n", page->index, ret);
+		}
+
+		if (!Page_Uptodate(page) && io->good_idx == -1) {
+			pr_debug("first good_idx=%d (%lu)\n", i, page->index);
+			io->good_idx = i;
+			io->good_page = page;
+		}
+	}
+
+	/* Whee, all the pages are uptodate! */
+	if (!io->good_page) {
+		pr_debug("all pages uptodate!\n");
+		io->good_idx = io->nr_pages;
+		__iodesc_read_finish(io);
+		return;
+	}
+
+	pr_debug("locking good_page\n");
+	if (wtd_lock_page(&io->wtd, io->good_page))
+		__iodesc_make_uptodate(io);
+	return;
+}
+
+static void __wtdgeneric_file_read_iodesc(void *data)
+{
+	struct iodesc *io = data;
+	__generic_file_read_iodesc(io, 1);
+}
+
+static int generic_file_rw_kvec(struct file *file, int rw, kvec_cb_t cb,
+			 size_t size, loff_t pos);
+
+int generic_file_kvec_read(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	return generic_file_rw_kvec(file, READ, cb, size, pos);
+}
+
+int generic_file_kvec_write(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	return generic_file_rw_kvec(file, WRITE, cb, size, pos);
+}
+
+int generic_file_rw_kvec(struct file *file, int rw, kvec_cb_t cb,
+			 size_t size, loff_t pos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct address_space *as = inode->i_mapping;
+	unsigned long index;
+	unsigned long eindex;
+	unsigned long nr_pages;
+	struct iodesc *io = NULL;
+	int ret;
+	int append = 0;
+
+	ret = -EINVAL;
+	if (unlikely(rw != READ && rw != WRITE))
+		goto out;
+
+	append = unlikely(0 != (file->f_flags & O_APPEND));
+
+	/* Don't check pos when appending, but otherwise do santity 
+	 * checks before allocating memory.  -'ve offsets are invalid.
+	 */
+	if (unlikely(!append && pos < 0))
+		goto out;
+
+	ret = -ENOMEM;
+	io = kmalloc(sizeof(*io), GFP_KERNEL);
+	if (!io)
+		goto out;
+
+	memset(io, 0, sizeof(*io));
+	io->size = size;
+
+	/* FIXME: make the down a WTD_op */
+	if (rw == WRITE) {
+		unsigned long long tmp;
+		loff_t limit;
+
+		down(&inode->i_sem);
+		if (append)
+			pos = inode->i_size;
+
+		limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+		if (likely(RLIM_INFINITY == limit))
+			limit = OFFSET_MAX;
+
+		/* Filesystem limits take precedence over user limits */
+		if (likely(inode->i_sb->s_maxbytes < limit))
+			limit = inode->i_sb->s_maxbytes;
+
+	        if (unlikely(pos >= limit)) {
+			pr_debug("maxbytes: %Ld\n", limit);
+			ret = 0;
+			if (size || pos > limit)
+				ret = -EFBIG;
+			goto out_io;
+		}
+
+		/* Clamp writes straddling limit. */
+		tmp = pos + size;
+		if (unlikely(tmp > (unsigned long long)limit))
+			size = limit - pos;
+	}
+
+	if (READ == rw) {
+		pr_debug("pos=%Ld i_size=%Ld\n", pos, inode->i_size);
+
+		if (pos > inode->i_size)
+			size = 0;
+		else if ((pos + size) > inode->i_size)
+			size = inode->i_size - pos;
+
+		if (io->size < size)
+			size = io->size;
+		else if (size < io->size)
+			io->size = size;
+
+		pr_debug("io->size=%d size=%d\n", io->size, size);
+	}
+
+	ret = 0;
+	if (unlikely(!size))
+		goto out_io;
+
+	index = pos >> PAGE_CACHE_SHIFT;
+	eindex = (pos + size - 1) >> PAGE_CACHE_SHIFT;
+	nr_pages = eindex - index + 1;
+
+	pr_debug("nr_pages: %lu\n", nr_pages);
+
+	io->good_idx = -1;
+	io->good_page = NULL;
+	io->did_read = 0;
+	io->err = 0;
+	io->rw = rw;
+	io->as = as;
+	io->offset = (unsigned long)pos & (PAGE_CACHE_SIZE - 1);
+	io->file = file;
+	io->cb = cb;
+	kvec_dst_init(&io->src, KM_USER0);
+	kvec_dst_set(&io->src, cb.vec->veclet);
+	io->veclet = cb.vec->veclet;
+	if (nr_pages < READDESC_NR_DEF) {
+		io->pages = io->def_pages;
+		io->new_pages = io->def_new_pages;
+	} else {
+		io->pages = kmalloc(sizeof(*io->pages) * (nr_pages + 1), GFP_KERNEL);
+		if (!io->pages)
+			goto out_io;
+
+		io->new_pages = kmalloc(sizeof(*io->new_pages) * (nr_pages + 1), GFP_KERNEL);
+		if (!io->new_pages)
+			goto out_pages;
+	}
+
+	ret = address_space_map(as, index, nr_pages, io->pages,
+			&io->nr_new_pages, io->new_pages);
+	pr_debug("as_map: %d (%d new)\n", ret, io->nr_new_pages);
+	if (ret <= 0)
+		goto out_new_pages;
+
+	io->nr_pages = ret;
+	io->pages[io->nr_pages] = NULL;
+	io->new_pages[io->nr_new_pages] = NULL;
+
+	if (rw == READ)
+		__generic_file_read_iodesc(io, 0);
+	else if (rw == WRITE)
+		__generic_file_write_iodesc(io);
+
+	return 0;
+
+out_new_pages:
+	if (io->new_pages != io->def_new_pages)
+		kfree(io->new_pages);
+out_pages:
+	if (io->pages != io->def_pages)
+		kfree(io->pages);
+out_io:
+	kfree(io);
+
+	if (rw == WRITE)
+		up(&inode->i_sem);
+out:
+	if (!ret)
+		cb.fn(cb.data, cb.vec, ret);
+	return ret;
+}
diff -urN v2.4.19-pre5/mm/memory.c linux.diff/mm/memory.c
--- v2.4.19-pre5/mm/memory.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/mm/memory.c	Mon May 13 01:27:05 2002
@@ -45,6 +45,8 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/compiler.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -1492,3 +1494,229 @@
 	}
 	return page;
 }
+
+/*
+ * Force in an entire range of pages from the current process's user VA,
+ * and pin them in physical memory.  
+ * FIXME: some architectures need to flush the cache based on user addresses 
+ * here.  Someone please provide a better macro than flush_cache_page.
+ */
+
+#define dprintk(x...)
+atomic_t user_pinned_pages = ATOMIC_INIT(0);
+
+struct kvec *map_user_kvec(int rw, unsigned long ptr, size_t len)
+{
+	return mm_map_user_kvec(current->mm, rw, ptr, len);
+}
+
+struct kvec *mm_map_user_kvec(struct mm_struct *mm, int rw, unsigned long ptr,
+			      size_t len)
+{
+	struct kvec		*vec;
+	struct kveclet		*veclet;
+	unsigned long		end;
+	int			err;
+	struct vm_area_struct *	vma = 0;
+	int			i;
+	int			datain = (rw == READ);
+	unsigned		nr_pages;
+
+	end = ptr + len;
+	if (end < ptr)
+		return ERR_PTR(-EINVAL);
+
+	nr_pages = (ptr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	nr_pages -= ptr >> PAGE_SHIFT;
+	nr_pages ++;
+
+	atomic_add(nr_pages, &user_pinned_pages);
+	err = -EAGAIN;
+	if (unlikely(atomic_read(&user_pinned_pages) >= aio_max_pinned))
+		goto out_adjust;
+
+	vec = kmalloc(sizeof(struct kvec) + nr_pages * sizeof(struct kveclet),
+			GFP_KERNEL);
+	err = -ENOMEM;
+	if (unlikely(!vec))
+		goto out_adjust;
+
+	vec->nr = 0;
+	vec->max_nr = nr_pages;
+	veclet = vec->veclet;
+	
+	/* Make sure the iobuf is not already mapped somewhere. */
+	dprintk ("map_user_kiobuf: begin\n");
+	
+	down_read(&mm->mmap_sem);
+
+	err = -EFAULT;
+	
+	i = 0;
+
+	/* 
+	 * First of all, try to fault in all of the necessary pages
+	 */
+	while (ptr < end) {
+		struct page *map;
+		veclet->offset = ptr & ~PAGE_MASK;
+		veclet->length = PAGE_SIZE - veclet->offset;
+		if (len < veclet->length)
+			veclet->length = len;
+		ptr &= PAGE_MASK;
+		len -= veclet->length;
+
+		if (!vma || ptr >= vma->vm_end) {
+			vma = find_vma(mm, ptr);
+			if (!vma) 
+				goto out_unlock;
+			if (vma->vm_start > ptr) {
+				if (!(vma->vm_flags & VM_GROWSDOWN))
+					goto out_unlock;
+				if (expand_stack(vma, ptr))
+					goto out_unlock;
+			}
+			if (((datain) && (!(vma->vm_flags & VM_WRITE))) ||
+					(!(vma->vm_flags & VM_READ))) {
+				err = -EFAULT;
+				goto out_unlock;
+			}
+		}
+		spin_lock(&mm->page_table_lock);
+		while (!(map = follow_page(mm, ptr, datain))) {
+			int ret;
+
+			spin_unlock(&mm->page_table_lock);
+			ret = handle_mm_fault(mm, vma, ptr, datain);
+			if (ret <= 0) {
+				if (!ret)
+					goto out_unlock;
+				else {
+					err = -ENOMEM;
+					goto out_unlock;
+				}
+			}
+			spin_lock(&mm->page_table_lock);
+		}			
+		map = get_page_map(map);
+		if (map) {
+			flush_dcache_page(map);
+			atomic_inc(&map->count);
+		} else
+			printk (KERN_INFO "Mapped page missing [%d]\n", i);
+		spin_unlock(&mm->page_table_lock);
+		veclet->page = map;
+		veclet++;
+
+		ptr += PAGE_SIZE;
+		vec->nr = ++i;
+	}
+
+	veclet->page = NULL;	/* dummy for the prefetch in free_kvec */
+	veclet->length = 0;	/* bug checking ;-) */
+
+	up_read(&mm->mmap_sem);
+	dprintk ("map_user_kiobuf: end OK\n");
+	return vec;
+
+ out_unlock:
+	up_read(&mm->mmap_sem);
+	unmap_kvec(vec, 0);
+	kfree(vec);
+	dprintk("map_user_kvec: err(%d) rw=%d\n", err, rw);
+	return ERR_PTR(err);
+
+ out_adjust:
+	atomic_sub(nr_pages, &user_pinned_pages);
+	dprintk("map_user_kvec: err(%d) rw=%d\n", err, rw);
+	return ERR_PTR(err);
+}
+
+/*
+ * Unmap all of the pages referenced by a kiobuf.  We release the pages,
+ * and unlock them if they were locked. 
+ */
+
+void unmap_kvec (struct kvec *vec, int dirtied)
+{
+	struct kveclet *veclet = vec->veclet;
+	struct kveclet *end = vec->veclet + vec->nr;
+	struct page *map = veclet->page;
+
+	prefetchw(map);
+	for (; veclet<end; map = (++veclet)->page) {
+		prefetchw(veclet[1].page);
+		if (likely(map != NULL) && !PageReserved(map)) {
+			if (dirtied) {
+				SetPageDirty(map);
+				flush_dcache_page(map);	/* FIXME */
+			}
+			__free_page(map);
+		}
+	}
+
+	atomic_sub(vec->max_nr, &user_pinned_pages);
+	vec->nr = 0;
+}
+
+void free_kvec(struct kvec *vec)
+{
+	if (unlikely(vec->nr))
+		BUG();
+	kfree(vec);
+}
+
+/* kvec memory copy helper: appends len bytes in from to dst.
+ */
+void memcpy_to_kvec_dst(struct kvec_dst *dst, const char *from, long len)
+{
+	if (unlikely(len < 0))
+		BUG();
+	do {
+		int cnt = len;
+		if (dst->space < cnt)
+			cnt = dst->space;
+
+		memcpy(dst->dst, from, cnt);
+		from += cnt;
+		dst->space -= cnt;
+		dst->dst += cnt;
+		len -= cnt;
+		if (!dst->space && len) {
+			kvec_dst_unmap(dst);
+			dst->let++;
+			dst->offset = 0;
+			kvec_dst_map(dst);
+			if (unlikely(!dst->space))
+				BUG();
+		}
+	} while (len);
+}
+
+/* kvec memory copy helper: copies and consumes len bytes in from to dst.
+ */
+void memcpy_from_kvec_dst(char *to, struct kvec_dst *from, long len)
+{
+	if (unlikely(len < 0))
+		BUG();
+	do {
+		int cnt = len;
+		if (from->space < cnt)
+			cnt = from->space;
+
+		memcpy(to, from->dst, cnt);
+		to += cnt;
+		from->space -= cnt;
+		from->dst += cnt;
+		len -= cnt;
+		if (unlikely(!from->space && len)) {
+			kvec_dst_unmap(from);
+			from->let++;
+			from->offset = 0;
+			kvec_dst_map(from);
+			if (unlikely(!from->space))
+				BUG();
+		}
+	} while (len);
+}
+
diff -urN v2.4.19-pre5/mm/mmap.c linux.diff/mm/mmap.c
--- v2.4.19-pre5/mm/mmap.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/mm/mmap.c	Mon May 13 16:20:31 2002
@@ -14,6 +14,7 @@
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/personality.h>
+#include <linux/compiler.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
@@ -548,7 +549,17 @@
 	 * Answer: Yes, several device drivers can do it in their
 	 *         f_op->mmap method. -DaveM
 	 */
-	addr = vma->vm_start;
+	if (addr != vma->vm_start) {
+		struct vm_area_struct *stale_vma;
+		/* Since addr changed, we rely on the mmap op to prevent 
+		 * collisions with existing vmas and just use find_vma_prepare 
+		 * to update the tree pointers.
+		 */
+		addr = vma->vm_start;
+		stale_vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+		if (unlikely(stale_vma && stale_vma->vm_start < vma->vm_end))
+			BUG();
+	}
 
 	vma_link(mm, vma, prev, rb_link, rb_parent);
 	if (correct_wcount)
diff -urN v2.4.19-pre5/mm/slab.c linux.diff/mm/slab.c
--- v2.4.19-pre5/mm/slab.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/mm/slab.c	Thu Apr 18 18:37:10 2002
@@ -447,7 +447,7 @@
 		 * eliminates "false sharing".
 		 * Note for systems short on memory removing the alignment will
 		 * allow tighter packing of the smaller caches. */
-		sprintf(name,"size-%Zd",sizes->cs_size);
+		snprintf(name, sizeof(name), "size-%Zd",sizes->cs_size);
 		if (!(sizes->cs_cachep =
 			kmem_cache_create(name, sizes->cs_size,
 					0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
@@ -459,7 +459,7 @@
 			offslab_limit = sizes->cs_size-sizeof(slab_t);
 			offslab_limit /= 2;
 		}
-		sprintf(name, "size-%Zd(DMA)",sizes->cs_size);
+		snprintf(name, sizeof(name), "size-%Zd(DMA)",sizes->cs_size);
 		sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,
 			      SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);
 		if (!sizes->cs_dmacachep)
@@ -1841,7 +1841,7 @@
 			off -= len;		\
 			len = 0;		\
 		} else {			\
-			if (len-off > count)	\
+			if (len-off >= count)	\
 				goto t;		\
 		}				\
 	} while (0)
@@ -1854,7 +1854,7 @@
 	/* Output format version, so at least we can change it without _too_
 	 * many complaints.
 	 */
-	len += sprintf(page+len, "slabinfo - version: 1.1"
+	len += snprintf(page+len, PAGE_SIZE-len, "slabinfo - version: 1.1"
 #if STATS
 				" (statistics)"
 #endif
@@ -1902,7 +1902,8 @@
 		num_slabs+=active_slabs;
 		num_objs = num_slabs*cachep->num;
 
-		len += sprintf(page+len, "%-17s %6lu %6lu %6u %4lu %4lu %4u",
+		len += snprintf(page+len, PAGE_SIZE-len,
+			"%-17s %6lu %6lu %6u %4lu %4lu %4u",
 			cachep->name, active_objs, num_objs, cachep->objsize,
 			active_slabs, num_slabs, (1<<cachep->gfporder));
 
@@ -1914,7 +1915,8 @@
 			unsigned long reaped = cachep->reaped;
 			unsigned long allocs = cachep->num_allocations;
 
-			len += sprintf(page+len, " : %6lu %7lu %5lu %4lu %4lu",
+			len += snprintf(page+len, PAGE_SIZE-len,
+					" : %6lu %7lu %5lu %4lu %4lu",
 					high, allocs, grown, reaped, errors);
 		}
 #endif
@@ -1928,7 +1930,7 @@
 				limit = cc->limit;
 			else
 				limit = 0;
-			len += sprintf(page+len, " : %4u %4u",
+			len += snprintf(page+len, PAGE_SIZE-len, " : %4u %4u",
 					limit, batchcount);
 		}
 #endif
@@ -1938,21 +1940,27 @@
 			unsigned long allocmiss = atomic_read(&cachep->allocmiss);
 			unsigned long freehit = atomic_read(&cachep->freehit);
 			unsigned long freemiss = atomic_read(&cachep->freemiss);
-			len += sprintf(page+len, " : %6lu %6lu %6lu %6lu",
+			len += snprintf(page+len, PAGE_SIZE-len,
+					" : %6lu %6lu %6lu %6lu",
 					allochit, allocmiss, freehit, freemiss);
 		}
 #endif
-		len += sprintf(page+len,"\n");
+		len += snprintf(page+len, PAGE_SIZE-len, "\n");
 		spin_unlock_irq(&cachep->spinlock);
 		FIXUP(got_data_up);
 		p = cachep->next.next;
+		if (len > PAGE_SIZE - 512)
+			break;
 	} while (p != &cache_cache.next);
 got_data_up:
 	up(&cache_chain_sem);
 
 got_data:
-	*start = page+off;
-	return len;
+	if (off < len) {
+		*start = page+off;
+		return len;
+	}
+	return 0;
 }
 
 /**
diff -urN v2.4.19-pre5/mm/wtd.c linux.diff/mm/wtd.c
--- v2.4.19-pre5/mm/wtd.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/mm/wtd.c	Thu Apr 11 16:17:49 2002
@@ -0,0 +1,73 @@
+#include <linux/worktodo.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+static void __wtd_lock_page_waiter(wait_queue_t *wait)
+{
+	struct worktodo *wtd = (struct worktodo *)wait;
+	struct page *page = (struct page *)wtd->data;
+
+	if (!TryLockPage(page)) {
+		__remove_wait_queue(page_waitqueue(page), &wtd->wait);
+		wtd_queue(wtd);
+	} else
+		schedule_task(&run_disk_tq);
+}
+
+int wtd_lock_page(struct worktodo *wtd, struct page *page)
+{
+	if (TryLockPage(page)) {
+		wtd->data = page;
+		init_waitqueue_func_entry(&wtd->wait, __wtd_lock_page_waiter);
+
+		/* Wakeups may race with TryLockPage, so try again within the wait 
+		 * queue spinlock.
+		 */
+		if (!add_wait_queue_cond(page_waitqueue(page), &wtd->wait,
+					TryLockPage(page))) {
+			/* Page is still locked.  Kick the disk queue... */
+			run_task_queue(&tq_disk);
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+static void __wtd_bh_waiter(wait_queue_t *wait)
+{
+	struct worktodo *wtd = (struct worktodo *)wait;
+	struct buffer_head *bh = (struct buffer_head *)wtd->data;
+
+	if (!buffer_locked(bh)) {
+		__remove_wait_queue(&bh->b_wait, &wtd->wait);
+		wtd_queue(wtd);
+	} else {
+		schedule_task(&run_disk_tq);
+	}
+}
+
+int wtd_wait_on_buffer(struct worktodo *wtd, struct buffer_head *bh)
+{
+	if (!buffer_locked(bh)) {
+		return 1;
+	}
+	wtd->data = bh;
+	init_waitqueue_func_entry(&wtd->wait, __wtd_bh_waiter);
+	if (add_wait_queue_cond(&bh->b_wait, &wtd->wait, buffer_locked(bh)))
+		return 1;
+	run_task_queue(&tq_disk);
+	return 0;
+}
+
+void do_run_tq_disk(void *data)
+{
+	run_task_queue(&tq_disk);
+}
+
+struct tq_struct run_disk_tq = {
+	routine: do_run_tq_disk,
+	data: NULL
+};
+
diff -urN v2.4.19-pre5/net/core/datagram.c linux.diff/net/core/datagram.c
--- v2.4.19-pre5/net/core/datagram.c	Tue Jan  1 14:09:35 2002
+++ linux.diff/net/core/datagram.c	Mon Apr  8 13:06:39 2002
@@ -8,6 +8,8 @@
  *
  *	Authors:	Alan Cox <alan@redhat.com>. (datagram_poll() from old udp.c code)
  *
+ *	Portions Copyright 2001 Red Hat, Inc.
+ *
  *	Fixes:
  *		Alan Cox	:	NULL return from skb_peek_copy() understood
  *		Alan Cox	:	Rewrote skb_read_datagram to avoid the skb_peek_copy stuff.
@@ -21,6 +23,7 @@
  *		Darryl Miles	:	Fixed non-blocking SOCK_STREAM.
  *		Alan Cox	:	POSIXisms
  *		Pete Wyckoff    :       Unconnected accept() fix.
+ *		Benjamin LaHaise:	added kvec operations
  *
  */
 
@@ -37,6 +40,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/poll.h>
 #include <linux/highmem.h>
+#include <linux/worktodo.h>
 
 #include <net/protocol.h>
 #include <linux/skbuff.h>
@@ -446,3 +450,321 @@
 
 	return mask;
 }
+
+/*
+ */
+static inline void skb_copy_datagram_kvec_dst(const struct sk_buff *skb,
+		int offset, struct kvec_dst *dst, int len)
+{
+	int i, copy;
+	int start = skb->len - skb->data_len;
+
+	/* Copy header. */
+	if ((copy = start-offset) > 0) {
+		if (copy > len)
+			copy = len;
+		memcpy_to_kvec_dst(dst, skb->data + offset, copy);
+		if ((len -= copy) == 0)
+			return;
+		offset += copy;
+	}
+
+	/* Copy paged appendix. Hmm... why does this look so complicated? */
+	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+		int end;
+
+		BUG_TRAP(start <= offset+len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
+		if ((copy = end-offset) > 0) {
+			u8  *vaddr;
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			struct page *page = frag->page;
+
+			if (copy > len)
+				copy = len;
+			vaddr = kmap_atomic(page, KM_USER1);
+			memcpy_to_kvec_dst(dst, vaddr + frag->page_offset +
+					     offset-start, copy);
+			kunmap_atomic(vaddr, KM_USER1);
+			if (!(len -= copy))
+				return;
+			offset += copy;
+		}
+		start = end;
+	}
+
+	if (skb_shinfo(skb)->frag_list) {
+		struct sk_buff *list;
+
+		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+			int end;
+
+			BUG_TRAP(start <= offset+len);
+
+			end = start + list->len;
+			if ((copy = end-offset) > 0) {
+				if (copy > len)
+					copy = len;
+				skb_copy_datagram_kvec_dst(list, offset-start, dst, copy);
+				if ((len -= copy) == 0)
+					return;
+				offset += copy;
+			}
+			start = end;
+		}
+	}
+}
+
+void skb_copy_datagram_kvec(const struct sk_buff *skb, int offset,
+			   struct kvec *vec, int len)
+{
+	struct kvec_dst dst;
+	kvec_dst_init(&dst, KM_USER0);
+	kvec_dst_set(&dst, vec->veclet);
+	kvec_dst_map(&dst);
+	skb_copy_datagram_kvec_dst(skb, offset, &dst, len);
+	kvec_dst_unmap(&dst);
+}
+
+/* C++ would be better for this.  Please don't torture me with this code 
+ * ever again.
+ */
+static inline unsigned int csum_and_copy_to_dst(struct kvec_dst *dst,
+				 const char *from, int len, unsigned int csum)
+{
+	do {
+		int cnt = len;
+		if (dst->space < cnt)
+			cnt = dst->space;
+
+		memcpy(dst->dst, from, cnt);
+		csum = csum_partial_copy_nocheck(from, dst->dst, cnt, csum);
+		from += cnt;
+		dst->space -= cnt;
+		dst->dst += cnt;
+		len -= cnt;
+		if (!dst->space && len) {
+			kvec_dst_unmap(dst);
+			dst->let++;
+			dst->offset = 0;
+			kvec_dst_map(dst);
+			if (!dst->space)
+				BUG();
+		}
+	} while (len);
+	return csum;
+}
+
+static inline void skb_copy_and_csum_datagram_kvec_dst(const struct sk_buff *skb, int offset, struct kvec_dst *dst, int len, unsigned int *csump)
+{
+	int i, copy;
+	int start = skb->len - skb->data_len;
+	int pos = 0;
+
+	/* Copy header. */
+	if ((copy = start-offset) > 0) {
+		if (copy > len)
+			copy = len;
+		*csump = csum_and_copy_to_dst(dst, skb->data+offset, copy, *csump);
+		if ((len -= copy) == 0)
+			return;
+		offset += copy;
+		pos = copy;
+	}
+
+	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
+		int end;
+
+		BUG_TRAP(start <= offset+len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
+		if ((copy = end-offset) > 0) {
+			unsigned int csum2;
+			u8  *vaddr;
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			struct page *page = frag->page;
+
+			if (copy > len)
+				copy = len;
+			vaddr = kmap_atomic(page, KM_USER1);
+			csum2 = csum_and_copy_to_dst(dst,
+				vaddr + frag->page_offset + offset-start,
+				copy, 0);
+			kunmap_atomic(vaddr, KM_USER1);
+			*csump = csum_block_add(*csump, csum2, pos);
+			if (!(len -= copy))
+				return;
+			offset += copy;
+			pos += copy;
+		}
+		start = end;
+	}
+
+	if (skb_shinfo(skb)->frag_list) {
+		struct sk_buff *list;
+
+		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
+			int end;
+
+			BUG_TRAP(start <= offset+len);
+
+			end = start + list->len;
+			if ((copy = end-offset) > 0) {
+				unsigned int csum2 = 0;
+				if (copy > len)
+					copy = len;
+				skb_copy_and_csum_datagram_kvec_dst(list, offset-start, dst, copy, &csum2);
+				*csump = csum_block_add(*csump, csum2, pos);
+				if ((len -= copy) == 0)
+					return;
+				offset += copy;
+				pos += copy;
+			}
+			start = end;
+		}
+	}
+}
+
+int skb_copy_and_csum_datagram_kvec(const struct sk_buff *skb, int offset,
+			   struct kvec *vec, int len)
+{
+	unsigned int csum;
+	struct kvec_dst dst;
+
+	csum = csum_partial(skb->data, offset, skb->csum);
+
+	kvec_dst_init(&dst, KM_USER0);
+	kvec_dst_set(&dst, vec->veclet);
+	kvec_dst_map(&dst);
+	skb_copy_and_csum_datagram_kvec_dst(skb, offset, &dst, len, &csum);
+	kvec_dst_unmap(&dst);
+
+	if ((unsigned short)csum_fold(csum))
+		return -EINVAL;
+	return 0;
+}
+
+struct skb_async_info {
+	struct worktodo	wtd;
+	struct sock	*sk;
+	int		len;
+	void (*finish)(struct sock *sk, kvec_cb_t cb, int len, struct sk_buff *skb);
+	kvec_cb_t	cb;
+};
+static void skb_async_read_worker(void *_data);
+
+int skb_kvec_recv_datagram(struct sock * sk, kvec_cb_t cb, int len,
+	void (*finish)(struct sock *sk, kvec_cb_t cb, int len, struct sk_buff *skb))
+{
+	struct skb_async_info *info = kmalloc(sizeof(struct skb_async_info), GFP_KERNEL);
+	if (info) {
+		wtd_set_action(&info->wtd, skb_async_read_worker, info);
+		info->sk = sk;
+		info->len = len;
+		info->finish = finish;
+		info->cb = cb;
+		skb_async_read_worker(info);
+		return 0;
+	}
+	return -EAGAIN;
+}
+
+static void skb_async_read_waiter(wait_queue_t *wait)
+{
+	struct skb_async_info *info = (void *)wait;
+	__remove_wait_queue(info->sk->sleep, &info->wtd.wait);
+	wtd_queue(&info->wtd);
+}
+
+static void skb_async_read_worker(void *_data)
+{
+	struct skb_async_info	*info = _data;
+	struct sock *sk = info->sk;
+	struct sk_buff *skb;
+	int error;
+
+	/* Caller is allowed not to check sk->err before skb_recv_datagram() */
+	error = sock_error(sk);
+	if (error)
+		goto no_packet;
+
+
+	init_waitqueue_func_entry(&info->wtd.wait, skb_async_read_waiter);
+
+	/* Attempted to dequeue and process any skbs that already arrived.
+	 * Note that add_wait_queue_cond is used to check against a race
+	 * where an skb is added to the queue after we checked but before 
+	 * the callback is added to the wait queue.
+	 */
+	do {
+		skb = skb_dequeue(&sk->receive_queue);
+		if (skb) {
+			info->finish(sk, info->cb, info->len, skb);
+			kfree(info);
+			return;
+		}
+	} while ( add_wait_queue_cond( sk->sleep, &info->wtd.wait,
+					(!(error = sock_error(sk)) &&
+					skb_queue_empty(&sk->receive_queue)) )
+		  && !error);
+
+	if (!error)
+		return;
+
+no_packet:
+	info->cb.fn(info->cb.data, info->cb.vec, error);
+	kfree(info);
+	return;
+}
+
+#if 0
+static void skb_async_read_worker(void *_data)
+{
+	struct skb_async_info	*info = _data;
+	int error;
+
+	/* Socket errors? */
+	error = sock_error(sk);
+	if (error)
+		goto out_err;
+
+	if (!skb_queue_empty(&sk->receive_queue))
+		goto ready;
+
+	/* Socket shut down? */
+	if (sk->shutdown & RCV_SHUTDOWN)
+		goto out_noerr;
+
+	/* Sequenced packets can come disconnected. If so we report the problem */
+	error = -ENOTCONN;
+	if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN))
+		goto out_err;
+
+	/* handle signals */
+	if (signal_pending(current))
+		goto interrupted;
+
+	/* here: queue sleep */
+	*timeo_p = schedule_timeout(*timeo_p);
+	return;
+
+ready:
+	current->state = TASK_RUNNING;
+	remove_wait_queue(sk->sleep, &wait);
+	return 0;
+
+interrupted:
+	error = sock_intr_errno(*timeo_p);
+out_err:
+	*err = error;
+out:
+	current->state = TASK_RUNNING;
+	remove_wait_queue(sk->sleep, &wait);
+	return error;
+out_noerr:
+	*err = 0;
+	error = 1;
+	goto out;
+}
+#endif
diff -urN v2.4.19-pre5/net/core/sock.c linux.diff/net/core/sock.c
--- v2.4.19-pre5/net/core/sock.c	Tue Jan  1 14:09:35 2002
+++ linux.diff/net/core/sock.c	Mon Apr  8 00:37:02 2002
@@ -586,6 +586,8 @@
 	if(sk && zero_it) {
 		memset(sk, 0, sizeof(struct sock));
 		sk->family = family;
+		INIT_LIST_HEAD(&sk->kvec_read_list);
+		INIT_LIST_HEAD(&sk->kvec_write_list);
 		sock_lock_init(sk);
 	}
 
@@ -1116,7 +1118,7 @@
 void sock_def_wakeup(struct sock *sk)
 {
 	read_lock(&sk->callback_lock);
-	if (sk->sleep && waitqueue_active(sk->sleep))
+	if (sk->sleep)
 		wake_up_interruptible_all(sk->sleep);
 	read_unlock(&sk->callback_lock);
 }
@@ -1124,7 +1126,7 @@
 void sock_def_error_report(struct sock *sk)
 {
 	read_lock(&sk->callback_lock);
-	if (sk->sleep && waitqueue_active(sk->sleep))
+	if (sk->sleep)
 		wake_up_interruptible(sk->sleep);
 	sk_wake_async(sk,0,POLL_ERR); 
 	read_unlock(&sk->callback_lock);
@@ -1133,7 +1135,7 @@
 void sock_def_readable(struct sock *sk, int len)
 {
 	read_lock(&sk->callback_lock);
-	if (sk->sleep && waitqueue_active(sk->sleep))
+	if (sk->sleep)
 		wake_up_interruptible(sk->sleep);
 	sk_wake_async(sk,1,POLL_IN);
 	read_unlock(&sk->callback_lock);
@@ -1147,7 +1149,7 @@
 	 * progress.  --DaveM
 	 */
 	if((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf) {
-		if (sk->sleep && waitqueue_active(sk->sleep))
+		if (sk->sleep)
 			wake_up_interruptible(sk->sleep);
 
 		/* Should agree with poll, otherwise some programs break */
diff -urN v2.4.19-pre5/net/ipv4/af_inet.c linux.diff/net/ipv4/af_inet.c
--- v2.4.19-pre5/net/ipv4/af_inet.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/net/ipv4/af_inet.c	Mon Apr  8 00:37:02 2002
@@ -729,6 +729,19 @@
 }
 
 
+int inet_kvec_read(struct socket *sock, kvec_cb_t cb, size_t len)
+{
+	struct sock *sk = sock->sk;
+
+	return sk->prot->kvec_read(sk, cb, len);
+}
+
+int inet_kvec_write(struct socket *sock, kvec_cb_t cb, size_t len)
+{
+	struct sock *sk = sock->sk;
+
+	return sk->prot->kvec_write(sk, cb, len);
+}
 
 int inet_recvmsg(struct socket *sock, struct msghdr *msg, int size,
 		 int flags, struct scm_cookie *scm)
@@ -960,7 +973,9 @@
 	sendmsg:	inet_sendmsg,
 	recvmsg:	inet_recvmsg,
 	mmap:		sock_no_mmap,
-	sendpage:	tcp_sendpage
+	sendpage:	tcp_sendpage,
+	kvec_read:	inet_kvec_read,
+	kvec_write:	inet_kvec_write,
 };
 
 struct proto_ops inet_dgram_ops = {
@@ -982,6 +997,8 @@
 	recvmsg:	inet_recvmsg,
 	mmap:		sock_no_mmap,
 	sendpage:	sock_no_sendpage,
+	kvec_read:	inet_kvec_read,
+	kvec_write:	inet_kvec_write,
 };
 
 struct net_proto_family inet_family_ops = {
diff -urN v2.4.19-pre5/net/ipv4/tcp.c linux.diff/net/ipv4/tcp.c
--- v2.4.19-pre5/net/ipv4/tcp.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/net/ipv4/tcp.c	Tue Apr 30 17:29:31 2002
@@ -251,6 +251,7 @@
 #include <linux/poll.h>
 #include <linux/init.h>
 #include <linux/smp_lock.h>
+#include <linux/compiler.h>
 
 #include <net/icmp.h>
 #include <net/tcp.h>
@@ -470,8 +471,8 @@
 		if (sk->sleep && waitqueue_active(sk->sleep))
 			wake_up_interruptible(sk->sleep);
 
-		if (sock->fasync_list && !(sk->shutdown&SEND_SHUTDOWN))
-			sock_wake_async(sock, 2, POLL_OUT);
+		if (!(sk->shutdown&SEND_SHUTDOWN))
+			sk_wake_async(sk, 2, POLL_OUT);
 	}
 }
 
@@ -676,11 +677,266 @@
 	return 0;
 }
 
+struct tcp_write_async_info {
+	struct worktodo	wtd;
+	struct sock	*sk;
+	int		len;
+	int		done;
+	int		offset;
+	struct kveclet	*cur_let;
+	kvec_cb_t	cb;
+	spinlock_t	lock;
+};
+
+static void async_lock_sock_wait(wait_queue_t *wait)
+{
+	struct tcp_write_async_info *info = (void *)wait;
+	printk("async_lock_sock_wait(%p)\n", info);
+	if (!info->sk->lock.users) {
+		printk("async_lock_sock_wait: queuing\n");
+		__remove_wait_queue(info->sk->sleep, &info->wtd.wait);
+		wtd_queue(&info->wtd);
+	}
+}
+
+static void async_lock_sock(void *data)
+{
+	struct tcp_write_async_info *info = data;
+	struct sock *sk;
+	printk(KERN_DEBUG "async_lock_sock(%p)\n", info);
+	sk = info->sk;
+	spin_lock_bh(&sk->lock.slock);
+	if (sk->lock.users) {
+		printk(KERN_DEBUG "async_lock_sock: waiting\n");
+		wtd_push(&info->wtd, async_lock_sock, info);
+		init_waitqueue_func_entry(&info->wtd.wait, async_lock_sock_wait);
+		if (!add_wait_queue_cond(sk->sleep, &info->wtd.wait, !sk->lock.users)) {
+			spin_unlock_bh(&sk->lock.slock);
+			return;
+		}
+		wtd_pop(&info->wtd);
+	}
+	printk(KERN_DEBUG "async_lock_sock: locking\n");
+	sk->lock.users = 1;
+	spin_unlock_bh(&sk->lock.slock);
+	wtd_queue(&info->wtd);
+}
+
+static void async_wait_for_tcp_connect(void *data);
+int tcp_kvec_write(struct sock *sk, kvec_cb_t cb, int len)
+{
+	struct tcp_write_async_info *info;
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	printk(KERN_DEBUG "tcp_kvec_write: %p\n", info);
+	if (!info)
+		return -ENOMEM;
+	wtd_init(&info->wtd, async_wait_for_tcp_connect);
+	info->sk = sk;
+	info->len = len;
+	info->done = 0;
+	info->offset = 0;
+	info->cur_let = cb.vec->veclet;
+	info->cb = cb;
+	spin_lock_init(&info->lock);
+	async_lock_sock(info);
+	return 0;
+}
+
+static void async_cn_wait_task(void *data)
+{
+	struct tcp_write_async_info *info = (void *)data;
+	async_lock_sock(info);
+}
+
+static void async_cn_wait(wait_queue_t *wait)
+{
+	struct tcp_write_async_info *info = (void *)wait;
+	__remove_wait_queue(info->sk->sleep, &info->wtd.wait);
+	wtd_set_action(&info->wtd, async_cn_wait_task, info);
+	wtd_queue(&info->wtd);
+}
+
+/* sock_get_iocb
+ *	Attempts to allocate a local socket iocb, which allows high
+ *	performance for the common cases of a small number of ios
+ *	outstanding per socket.
+ */
+struct sock_iocb *sock_get_iocb(struct sock *sk)
+{
+	struct sock_iocb *iocb;
+
+	iocb = kmalloc(sizeof(*iocb), GFP_KERNEL);
+	return iocb;
+}
+
+void sock_put_iocb(struct sock_iocb *iocb)
+{
+	kfree(iocb);
+}
+
+/* tcp_kvec_read_kick
+ *	Attempts to process an async read request.  Must be called with 
+ *	the socket lock held.
+ */
+void tcp_kvec_read_kick(struct sock *sk, struct sock_iocb *iocb)
+{
+	TCP_CHECK_TIMER(sk);
+#if 0
+	if (unlikely(TCP_LISTEN == sk->state))
+		goto out;
+#endif
+	return;
+}
+
+/* tcp_kvec_read
+ *	Queues an async read request on a socket.  If there were 
+ &	no outstanding read requests, kicks the backlog processing.
+ */
+int tcp_kvec_read(struct sock *sk, kvec_cb_t cb, int size)
+{
+	struct sock_iocb *iocb;
+	printk("tcp_kvec_read(%p, %d): blah", sk, size);
+
+	iocb = sock_get_iocb(sk);
+	if (unlikely(NULL == iocb))
+		return -ENOMEM;
+
+	iocb->cb = cb;
+	kvec_dst_init(&iocb->dst, KM_USER0);
+
+	spin_lock_bh(&sk->lock.slock);
+	if (sk->lock.users != 0 || !list_empty(&sk->kvec_read_list)) {
+		list_add_tail(&iocb->list, &sk->kvec_read_list);
+		spin_unlock_bh(&sk->lock.slock);
+		return 0;
+	}
+	spin_unlock_bh(&sk->lock.slock);
+
+	/* We're the head read request and now own the socket lock;
+	 * attempt to kick off processing.
+	 */
+	tcp_kvec_read_kick(sk, iocb);
+	release_sock(sk);
+	return 0;
+}
+
+static void tcp_kvec_write_worker(struct tcp_write_async_info *info);
+static void async_wait_for_tcp_connect(void *data)
+{
+	struct tcp_write_async_info *info = data;
+	struct sock *sk = info->sk;
+	int err;
+	/* At this point the socket is locked for us. */
+	while((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+		if (sk->err) {
+			err = sock_error(sk);
+			goto error;
+		}
+		if ((1 << sk->state) &
+		   ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+			err = -EPIPE;
+			goto error;
+		}
+
+		sk->tp_pinfo.af_tcp.write_pending++;
+		init_waitqueue_func_entry(&info->wtd.wait, async_cn_wait);
+
+		/* Add our worker to the socket queue, but make sure the socket 
+		 * state isn't changed from when we checked while we do so.
+		 */
+		if (!add_wait_queue_cond(sk->sleep, &info->wtd.wait,
+			((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+			)) {
+			release_sock(sk);
+			return;
+		}
+	}
+	/* sk is now locked *and* the connection is established, let's 
+	 * proceed to the data transfer stage.
+	 */
+	tcp_kvec_write_worker(info);
+	return;
+
+error:
+	release_sock(sk);
+	info->cb.fn(info->cb.data, info->cb.vec, err);
+	kfree(info);
+}
+
 static inline int tcp_memory_free(struct sock *sk)
 {
 	return sk->wmem_queued < sk->sndbuf;
 }
 
+static void async_wait_for_tcp_memory(struct tcp_write_async_info *info);
+static void async_wait_for_tcp_memory_done(void *data)
+{
+	struct tcp_write_async_info *info = data;
+	info->sk->tp_pinfo.af_tcp.write_pending--;
+	if (tcp_memory_free(info->sk))
+		tcp_kvec_write_worker(info);
+	else
+		async_wait_for_tcp_memory(info);
+}
+
+static void async_wait_for_tcp_memory_waiting(void *data)
+{
+	struct tcp_write_async_info *info = data;
+	wtd_set_action(&info->wtd, async_wait_for_tcp_memory_done, info);
+	async_lock_sock(info);
+}
+
+static void async_wait_for_tcp_memory_wake(wait_queue_t *wait)
+{
+	struct tcp_write_async_info *info = (void *)wait;
+	__remove_wait_queue(info->sk->sleep, &info->wtd.wait);
+	wtd_set_action(&info->wtd, async_wait_for_tcp_memory_waiting, info);
+	wtd_queue(&info->wtd);
+}
+
+static void async_wait_for_tcp_memory(struct tcp_write_async_info *info)
+{
+	struct sock *sk = info->sk;
+	ssize_t res;
+	kvec_cb_t cb;
+	int raced = 0;
+
+	printk("async_wait_for_tcp_memory(%p)\n", info);
+	res = -EPIPE;
+	if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+		goto err;
+
+	if (tcp_memory_free(sk))
+		printk("async_wait_for_tcp_memory: spinning?\n");
+
+	init_waitqueue_func_entry(&info->wtd.wait, async_wait_for_tcp_memory_wake);
+	clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+	set_bit(SOCK_NOSPACE, &sk->socket->flags);
+	raced = add_wait_queue_cond( sk->sleep, &info->wtd.wait,
+		!(sk->err || (sk->shutdown & SEND_SHUTDOWN) || tcp_memory_free(sk)) );
+
+	sk->tp_pinfo.af_tcp.write_pending++;
+	if (raced) {
+		/* Requeue to be run here: this allows other tasks to 
+		 * get rescheduled in case of bugs
+		 */
+		wtd_set_action(&info->wtd, async_wait_for_tcp_memory_done, info);
+		wtd_queue(&info->wtd);
+		return;
+	}
+
+	release_sock(sk);
+	return;
+
+err:
+	printk("async_wait_for_tcp_memory: err %ld\n", (long)res);
+	if (info->done)
+		res = info->done;
+	cb = info->cb;
+	kfree(info);
+	cb.fn(cb.data, cb.vec, res);
+}
+
 /*
  *	Wait for more memory for a socket
  */
@@ -691,9 +947,17 @@
 	long current_timeo = *timeo;
 	DECLARE_WAITQUEUE(wait, current);
 
+	if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+		return -EPIPE;
+
 	if (tcp_memory_free(sk))
 		current_timeo = vm_wait = (net_random()%(HZ/5))+2;
 
+	if (!*timeo) {
+		set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+		return -EAGAIN;
+	}
+
 	add_wait_queue(sk->sleep, &wait);
 	for (;;) {
 		set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
@@ -744,7 +1008,7 @@
 	goto out;
 }
 
-ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags);
+ssize_t do_tcp_sendpages(struct sock *sk, struct kveclet *let, int poffset, size_t psize, int flags);
 
 static inline int
 can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
@@ -823,7 +1087,7 @@
 	return err;
 }
 
-ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags)
+ssize_t do_tcp_sendpages(struct sock *sk, struct kveclet *let, int poffset, size_t psize, int flags)
 {
 	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
 	int mss_now;
@@ -845,14 +1109,19 @@
 	if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
 		goto do_error;
 
+	while (poffset > let->length) {
+		poffset -= let->length;
+		let++;
+	}
+
 	while (psize > 0) {
 		struct sk_buff *skb = sk->write_queue.prev;
 		int offset, size, copy, i;
 		struct page *page;
 
-		page = pages[poffset/PAGE_SIZE];
-		offset = poffset % PAGE_SIZE;
-		size = min_t(size_t, psize, PAGE_SIZE-offset);
+		page = let->page;
+		offset = let->offset;
+		size = min_t(unsigned int, psize, let->length);
 
 		if (tp->send_head==NULL || (copy = mss_now - skb->len) <= 0) {
 new_segment:
@@ -892,6 +1161,10 @@
 
 		copied += copy;
 		poffset += copy;
+		if (poffset >= let->length) {
+			poffset = 0;
+			let++;
+		}
 		if (!(psize -= copy))
 			goto out;
 
@@ -931,6 +1204,7 @@
 
 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
 {
+	struct kveclet let = { page, 0, PAGE_SIZE };
 	ssize_t res;
 	struct sock *sk = sock->sk;
 
@@ -940,16 +1214,54 @@
 	    !(sk->route_caps & TCP_ZC_CSUM_FLAGS))
 		return sock_no_sendpage(sock, page, offset, size, flags);
 
-#undef TCP_ZC_CSUM_FLAGS
 
 	lock_sock(sk);
 	TCP_CHECK_TIMER(sk);
-	res = do_tcp_sendpages(sk, &page, offset, size, flags);
+	res = do_tcp_sendpages(sk, &let, offset, size, flags);
 	TCP_CHECK_TIMER(sk);
 	release_sock(sk);
 	return res;
 }
 
+static void tcp_kvec_write_worker(struct tcp_write_async_info *info)
+{
+	struct sock *sk = info->sk;
+	int res;
+	if (!(sk->route_caps & NETIF_F_SG) || 
+	    !(sk->route_caps & TCP_ZC_CSUM_FLAGS))
+		BUG();
+
+	res = do_tcp_sendpages(sk, info->cur_let, info->offset, info->len - info->done, MSG_DONTWAIT);
+	if (res > 0)
+		info->done += res;
+
+	if (res == -EAGAIN) {
+		printk("tcp_kvec_write_worker: -EAGAIN: queuing\n");
+		goto requeue;
+	}
+
+	while (res > info->cur_let->length) {
+		res -= info->cur_let->length;
+		info->cur_let++;
+	}
+
+	if (res <= 0 || (info->done >= info->len)) {
+		kvec_cb_t cb = info->cb;
+		printk("tcp_kvec_write_worker: error(%d)\n", res);
+		if (info->done)
+			res = info->done;
+		release_sock(sk);
+		kfree(info);
+		cb.fn(cb.data, cb.vec, res);
+		return;
+	}
+
+requeue:
+	async_wait_for_tcp_memory(info);
+}
+
+#undef TCP_ZC_CSUM_FLAGS
+
 #define TCP_PAGE(sk)	(sk->tp_pinfo.af_tcp.sndmsg_page)
 #define TCP_OFF(sk)	(sk->tp_pinfo.af_tcp.sndmsg_off)
 
diff -urN v2.4.19-pre5/net/ipv4/tcp_ipv4.c linux.diff/net/ipv4/tcp_ipv4.c
--- v2.4.19-pre5/net/ipv4/tcp_ipv4.c	Wed Apr  3 21:04:41 2002
+++ linux.diff/net/ipv4/tcp_ipv4.c	Mon Apr  8 00:37:02 2002
@@ -2299,6 +2299,8 @@
 	hash:		tcp_v4_hash,
 	unhash:		tcp_unhash,
 	get_port:	tcp_v4_get_port,
+	kvec_read:	tcp_kvec_read,
+	kvec_write:	tcp_kvec_write,
 };
 
 
diff -urN v2.4.19-pre5/net/ipv4/udp.c linux.diff/net/ipv4/udp.c
--- v2.4.19-pre5/net/ipv4/udp.c	Wed Apr  3 21:04:42 2002
+++ linux.diff/net/ipv4/udp.c	Mon Apr  8 00:37:02 2002
@@ -93,6 +93,7 @@
 #include <net/route.h>
 #include <net/inet_common.h>
 #include <net/checksum.h>
+#include <linux/compiler.h>
 
 /*
  *	Snmp MIB for the UDP layer
@@ -619,6 +620,74 @@
 		__udp_checksum_complete(skb);
 }
 
+void udp_kvec_read_finish(struct sock *sk, kvec_cb_t cb, int len, struct sk_buff *skb)
+{
+  	struct sockaddr_in *sin = NULL;
+	int msg_flags = 0;
+  	int copied, err;
+
+	if (!skb)
+		BUG();
+
+  	copied = skb->len - sizeof(struct udphdr);
+	if (copied > len) {
+		copied = len;
+		msg_flags |= MSG_TRUNC;
+	}
+
+	err = 0;
+
+	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
+		skb_copy_datagram_kvec(skb, sizeof(struct udphdr),
+					      cb.vec, copied);
+	} else if (msg_flags&MSG_TRUNC) {
+		err = -EAGAIN;
+		if (unlikely(__udp_checksum_complete(skb))) {
+			UDP_INC_STATS_BH(UdpInErrors);
+			goto out_free;
+		}
+		err = 0;
+		skb_copy_datagram_kvec(skb, sizeof(struct udphdr),
+					      cb.vec, copied);
+	} else {
+		err = skb_copy_and_csum_datagram_kvec(skb,
+					sizeof(struct udphdr), cb.vec, copied);
+	}
+
+	if (err)
+		goto out_free;
+
+	//sock_recv_timestamp(msg, sk, skb);
+
+	/* Copy the address. */
+	if (sin)
+	{
+		sin->sin_family = AF_INET;
+		sin->sin_port = skb->h.uh->source;
+		sin->sin_addr.s_addr = skb->nh.iph->saddr;
+		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+  	}
+	//if (sk->protinfo.af_inet.cmsg_flags)
+	//	ip_cmsg_recv(msg, skb);
+	err = copied;
+  
+out_free:
+  	skb_free_datagram(sk, skb);
+  	cb.fn(cb.data, cb.vec, err);
+	return;
+}
+
+static int udp_kvec_read(struct sock *sk, kvec_cb_t cb, int len)
+{
+	return skb_kvec_recv_datagram(sk, cb, len, udp_kvec_read_finish);
+}
+
+static int udp_kvec_write(struct sock *sk, kvec_cb_t cb, int len)
+{
+	return -EINVAL;		/* TODO: someone please write ;-) */
+}
+
+
 /*
  * 	This should be easy, if there is something there we
  * 	return it, otherwise we block.
@@ -1037,6 +1106,8 @@
 	getsockopt:	ip_getsockopt,
 	sendmsg:	udp_sendmsg,
 	recvmsg:	udp_recvmsg,
+	kvec_read:	udp_kvec_read,
+	kvec_write:	udp_kvec_write,
 	backlog_rcv:	udp_queue_rcv_skb,
 	hash:		udp_v4_hash,
 	unhash:		udp_v4_unhash,
diff -urN v2.4.19-pre5/net/khttpd/datasending.c linux.diff/net/khttpd/datasending.c
--- v2.4.19-pre5/net/khttpd/datasending.c	Mon Sep 24 02:16:05 2001
+++ linux.diff/net/khttpd/datasending.c	Tue Apr  2 18:56:57 2002
@@ -127,7 +127,7 @@
 				desc.count = ReadSize;
 				desc.buf = (char *) CurrentRequest->sock;
 				desc.error = 0;
-				do_generic_file_read(CurrentRequest->filp, ppos, &desc, sock_send_actor);
+				do_generic_file_read(CurrentRequest->filp, ppos, &desc, sock_send_actor, 0);
 				if (desc.written>0)
 				{	
 					CurrentRequest->BytesSent += desc.written;
diff -urN v2.4.19-pre5/net/socket.c linux.diff/net/socket.c
--- v2.4.19-pre5/net/socket.c	Wed Apr  3 21:04:42 2002
+++ linux.diff/net/socket.c	Mon Apr  8 13:16:30 2002
@@ -44,6 +44,7 @@
  *		Tigran Aivazian	:	sys_send(args) calls sys_sendto(args, NULL, 0)
  *		Tigran Aivazian	:	Made listen(2) backlog sanity checks 
  *					protocol-independent
+ *		Benjamin LaHaise:	real aio support.
  *
  *
  *		This program is free software; you can redistribute it and/or
@@ -104,6 +105,8 @@
 			  unsigned long count, loff_t *ppos);
 static ssize_t sock_sendpage(struct file *file, struct page *page,
 			     int offset, size_t size, loff_t *ppos, int more);
+static int sock_kvec_read(struct file *file, kvec_cb_t cb, size_t size, loff_t pos);
+static int sock_kvec_write(struct file *file, kvec_cb_t cb, size_t size, loff_t pos);
 
 
 /*
@@ -123,7 +126,11 @@
 	fasync:		sock_fasync,
 	readv:		sock_readv,
 	writev:		sock_writev,
-	sendpage:	sock_sendpage
+	sendpage:	sock_sendpage,
+	aio_read:	generic_sock_aio_read,
+	aio_write:	generic_file_aio_write,
+	kvec_read:	sock_kvec_read,
+	kvec_write:	sock_kvec_write,
 };
 
 /*
@@ -533,13 +540,14 @@
 static ssize_t sock_read(struct file *file, char *ubuf,
 			 size_t size, loff_t *ppos)
 {
+	int read_flags = 0;
 	struct socket *sock;
 	struct iovec iov;
 	struct msghdr msg;
 	int flags;
 
-	if (ppos != &file->f_pos)
-		return -ESPIPE;
+	if (read_flags & ~F_ATOMIC)
+		return -EINVAL;
 	if (size==0)		/* Match SYS5 behaviour */
 		return 0;
 
@@ -554,6 +562,8 @@
 	iov.iov_base=ubuf;
 	iov.iov_len=size;
 	flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
+	if (read_flags & F_ATOMIC)
+		flags |= MSG_DONTWAIT;
 
 	return sock_recvmsg(sock, &msg, size, flags);
 }
@@ -567,12 +577,13 @@
 static ssize_t sock_write(struct file *file, const char *ubuf,
 			  size_t size, loff_t *ppos)
 {
+	int flags = 0;
 	struct socket *sock;
 	struct msghdr msg;
 	struct iovec iov;
-	
-	if (ppos != &file->f_pos)
-		return -ESPIPE;
+
+	if (flags & ~F_ATOMIC)
+		return -EINVAL;
 	if(size==0)		/* Match SYS5 behaviour */
 		return 0;
 
@@ -585,6 +596,8 @@
 	msg.msg_control=NULL;
 	msg.msg_controllen=0;
 	msg.msg_flags=!(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
+	if (flags & F_ATOMIC)
+		msg.msg_flags = MSG_DONTWAIT;
 	if (sock->type == SOCK_SEQPACKET)
 		msg.msg_flags |= MSG_EOR;
 	iov.iov_base=(void *)ubuf;
@@ -611,6 +624,29 @@
 	return sock->ops->sendpage(sock, page, offset, size, flags);
 }
 
+static int sock_kvec_read(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	struct socket *sock;
+	sock = socki_lookup(file->f_dentry->d_inode);
+	if ((int)size < 0 || (size_t)(int)size != size)
+		return -EINVAL;
+	if (sock->ops->kvec_read)
+		return sock->ops->kvec_read(sock, cb, size);
+	return -EOPNOTSUPP;
+}
+
+static int sock_kvec_write(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	struct socket *sock;
+	sock = socki_lookup(file->f_dentry->d_inode);
+	if ((int)size < 0 || (size_t)(int)size != size)
+		return -EINVAL;
+	if (sock->ops->kvec_write)
+		return sock->ops->kvec_write(sock, cb, size);
+	return -EOPNOTSUPP;
+}
+
+
 int sock_readv_writev(int type, struct inode * inode, struct file * file,
 		      const struct iovec * iov, long count, long size)
 {
diff -urN v2.4.19-pre5/patches/epoll.diff linux.diff/patches/epoll.diff
--- v2.4.19-pre5/patches/epoll.diff	Wed Dec 31 19:00:00 1969
+++ linux.diff/patches/epoll.diff	Tue Apr 30 17:29:54 2002
@@ -0,0 +1,1396 @@
+diff -urN linux/Documentation/Configure.help test/Documentation/Configure.help
+--- linux/Documentation/Configure.help	Tue Apr  2 18:48:12 2002
++++ test/Documentation/Configure.help	Tue Apr 30 17:14:16 2002
+@@ -17983,6 +17983,26 @@
+   contains more information and the location of the joystick package
+   that you'll need.
+ 
++/dev/epoll support
++CONFIG_EVENTPOLL
++  This option will allow for the creation of a '/dev/epoll' character
++  device, with major number 10 (MISC_MAJOR) and minor number 124
++  (EVENTPOLL_MINOR).
++
++  This device can be used to very efficiently handle incoming events on a
++  socket, much more so than select() or poll(). There is a paper that
++  describes this device and how to program for it (as well as including
++  some very impressive benchmarks) at the following URL:
++  http://www.xmailserver.org/linux-patches/nio-improve.html
++
++  If you are writing very scalable servers and wish to code against
++  /dev/epoll for enhanced speed, say 'Y' or 'M' here. If you have
++  software in hand that requires (or can make use of) /dev/epoll,
++  also say 'Y' or 'M' here.
++
++  The vast majority of the planet can very safely say 'N' here
++  and breathe easily.
++
+ Game port support
+ CONFIG_INPUT_GAMEPORT
+   Gameport support is for the standard 15-pin PC gameport.  If you
+diff -urN linux/drivers/char/Config.in test/drivers/char/Config.in
+--- linux/drivers/char/Config.in	Tue Apr  2 18:47:44 2002
++++ test/drivers/char/Config.in	Tue Apr 30 17:14:16 2002
+@@ -220,6 +220,7 @@
+ dep_tristate 'AMD 768 Random Number Generator support' CONFIG_AMD_RNG $CONFIG_PCI
+ dep_tristate 'Intel i8x0 Random Number Generator support' CONFIG_INTEL_RNG $CONFIG_PCI
+ tristate '/dev/nvram support' CONFIG_NVRAM
++tristate '/dev/epoll - Efficent file event polling method' CONFIG_EVENTPOLL
+ tristate 'Enhanced Real Time Clock Support' CONFIG_RTC
+ if [ "$CONFIG_IA64" = "y" ]; then
+    bool 'EFI Real Time Clock Services' CONFIG_EFI_RTC
+diff -urN linux/drivers/char/Makefile test/drivers/char/Makefile
+--- linux/drivers/char/Makefile	Tue Apr  2 18:47:43 2002
++++ test/drivers/char/Makefile	Tue Apr 30 17:14:16 2002
+@@ -208,6 +208,7 @@
+ ifeq ($(CONFIG_PPC),)
+   obj-$(CONFIG_NVRAM) += nvram.o
+ endif
++obj-$(CONFIG_EVENTPOLL) += eventpoll.o
+ obj-$(CONFIG_TOSHIBA) += toshiba.o
+ obj-$(CONFIG_I8K) += i8k.o
+ obj-$(CONFIG_DS1620) += ds1620.o
+diff -urN linux/drivers/char/eventpoll.c test/drivers/char/eventpoll.c
+--- linux/drivers/char/eventpoll.c	Wed Dec 31 19:00:00 1969
++++ test/drivers/char/eventpoll.c	Tue Apr 30 17:14:16 2002
+@@ -0,0 +1,800 @@
++/*
++ *  drivers/char/eventpoll.c
++ *
++ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
++ *
++ *  Efficent event polling implementation
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/signal.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++#include <linux/poll.h>
++#include <linux/miscdevice.h>
++#include <linux/random.h>
++#include <linux/smp_lock.h>
++#include <linux/wrapper.h>
++#include <linux/string.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/fcblist.h>
++#include <asm/bitops.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++
++#include <linux/eventpoll.h>
++
++
++
++
++
++#define DEBUG	0
++#ifdef DEBUG
++#define DPRINTK(x)	printk x
++#define DNPRINTK(n,x)	if (n <= DEBUG) printk x
++#else
++#define DPRINTK(x)
++#define DNPRINTK(n,x)
++#endif
++
++#define DEBUG_DPI	0
++
++#if DEBUG_DPI
++#define DPI_SLAB_DEBUG	(SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
++#else
++#define DPI_SLAB_DEBUG	0
++#endif
++
++#define INITIAL_HASH_BITS	7
++#define MAX_HASH_BITS	18
++#define RESIZE_LENGTH	2
++
++#define dpi_mem_alloc()	(struct epitem *) kmem_cache_alloc(dpi_cache, SLAB_KERNEL)
++#define dpi_mem_free(p) kmem_cache_free(dpi_cache, p)
++
++
++
++
++
++typedef unsigned long long event_version_t;
++
++struct eventpoll {
++	rwlock_t lock;
++	wait_queue_head_t wq;
++	wait_queue_head_t poll_wait;
++	struct list_head *hash;
++	unsigned int hbits;
++	unsigned int hmask;
++	atomic_t hents;
++	atomic_t resize;
++	int numpages;
++	char **pages;
++	char *pages0[MAX_EVENTPOLL_PAGES];
++	char *pages1[MAX_EVENTPOLL_PAGES];
++	atomic_t mmapped;
++	int eventcnt;
++	event_version_t ver;
++};
++
++struct epitem {
++	struct list_head llink;
++	struct eventpoll *ep;
++	struct file *file;
++	struct pollfd pfd;
++	int index;
++	event_version_t ver;
++};
++
++
++
++
++
++
++static int ep_alloc_pages(char **pages, int numpages);
++static int ep_free_pages(char **pages, int numpages);
++static int ep_init(struct eventpoll *ep);
++static void ep_free(struct eventpoll *ep);
++static inline struct epitem *ep_find_nl(struct eventpoll *ep, int fd);
++static struct epitem *ep_find(struct eventpoll *ep, int fd);
++static int ep_hashresize(struct eventpoll *ep, unsigned long *kflags);
++static int ep_insert(struct eventpoll *ep, struct pollfd *pfd);
++static int ep_remove(struct eventpoll *ep, struct epitem *dpi);
++static void notify_proc(struct file *file, void *data, unsigned long *local, long *event);
++static int open_eventpoll(struct inode *inode, struct file *file);
++static int close_eventpoll(struct inode *inode, struct file *file);
++static unsigned int poll_eventpoll(struct file *file, poll_table *wait);
++static int write_eventpoll(struct file *file, const char *buffer, size_t count,
++		loff_t *ppos);
++static int ep_poll(struct eventpoll *ep, void *arg);
++static int ioctl_eventpoll(struct inode *inode, struct file *file,
++		unsigned int cmd, unsigned long arg);
++static void eventpoll_mm_open(struct vm_area_struct * vma);
++static void eventpoll_mm_close(struct vm_area_struct * vma);
++static int mmap_eventpoll(struct file *file, struct vm_area_struct *vma);
++
++
++
++
++static kmem_cache_t *dpi_cache;
++
++static struct file_operations eventpoll_fops = {
++	write: write_eventpoll,
++	ioctl: ioctl_eventpoll,
++	mmap: mmap_eventpoll,
++	open: open_eventpoll,
++	release: close_eventpoll,
++	poll: poll_eventpoll
++};
++
++static struct vm_operations_struct eventpoll_mmap_ops = {
++	open: eventpoll_mm_open,
++	close: eventpoll_mm_close,
++};
++
++static struct miscdevice eventpoll = {
++	EVENTPOLL_MINOR, "eventpoll", &eventpoll_fops
++};
++
++
++
++
++static int ep_alloc_pages(char **pages, int numpages)
++{
++	int ii;
++
++	for (ii = 0; ii < numpages; ii++) {
++		pages[ii] = (char *) __get_free_pages(GFP_KERNEL, 0);
++		if (!pages[ii]) {
++			for (--ii; ii >= 0; ii--) {
++				clear_bit(PG_reserved, &virt_to_page(pages[ii])->flags);
++				free_pages((unsigned long) pages[ii], 0);
++			}
++			return -ENOMEM;
++		}
++		set_bit(PG_reserved, &virt_to_page(pages[ii])->flags);
++	}
++	return 0;
++}
++
++
++static int ep_free_pages(char **pages, int numpages)
++{
++	int ii;
++
++	for (ii = 0; ii < numpages; ii++) {
++		clear_bit(PG_reserved, &virt_to_page(pages[ii])->flags);
++		free_pages((unsigned long) pages[ii], 0);
++	}
++	return 0;
++}
++
++
++static int ep_init(struct eventpoll *ep)
++{
++	int ii, hentries;
++
++	rwlock_init(&ep->lock);
++	init_waitqueue_head(&ep->wq);
++	init_waitqueue_head(&ep->poll_wait);
++	ep->hbits = INITIAL_HASH_BITS;
++	ep->hmask = (1 << ep->hbits) - 1;
++	atomic_set(&ep->hents, 0);
++	atomic_set(&ep->resize, 0);
++	atomic_set(&ep->mmapped, 0);
++	ep->numpages = 0;
++	ep->pages = ep->pages0;
++	ep->eventcnt = 0;
++	ep->ver = 1;
++
++	hentries = ep->hmask + 1;
++	if (!(ep->hash = (struct list_head *) vmalloc(hentries * sizeof(struct list_head))))
++		return -ENOMEM;
++
++	for (ii = 0; ii < hentries; ii++)
++		INIT_LIST_HEAD(&ep->hash[ii]);
++
++	return 0;
++}
++
++
++static void ep_free(struct eventpoll *ep)
++{
++	int ii;
++	struct list_head *lnk;
++
++	lock_kernel();
++	for (ii = 0; ii <= ep->hmask; ii++) {
++		while ((lnk = list_first(&ep->hash[ii]))) {
++			struct epitem *dpi = list_entry(lnk, struct epitem, llink);
++
++			file_notify_delcb(dpi->file, notify_proc);
++			list_del(lnk);
++			dpi_mem_free(dpi);
++		}
++	}
++	vfree(ep->hash);
++	if (ep->numpages > 0) {
++		ep_free_pages(ep->pages0, ep->numpages);
++		ep_free_pages(ep->pages1, ep->numpages);
++	}
++	unlock_kernel();
++}
++
++
++static inline struct epitem *ep_find_nl(struct eventpoll *ep, int fd)
++{
++	struct epitem *dpi = NULL;
++	struct list_head *lsthead, *lnk;
++
++	lsthead = &ep->hash[fd & ep->hmask];
++	list_for_each(lnk, lsthead) {
++		dpi = list_entry(lnk, struct epitem, llink);
++
++		if (dpi->pfd.fd == fd) break;
++		dpi = NULL;
++	}
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_find(%d) -> %p\n", current, fd, dpi));
++
++	return dpi;
++}
++
++
++static struct epitem *ep_find(struct eventpoll *ep, int fd)
++{
++	struct epitem *dpi;
++	unsigned long flags;
++
++	read_lock_irqsave(&ep->lock, flags);
++
++	dpi = ep_find_nl(ep, fd);
++
++	read_unlock_irqrestore(&ep->lock, flags);
++
++	return dpi;
++}
++
++
++static int ep_hashresize(struct eventpoll *ep, unsigned long *kflags)
++{
++	struct list_head *hash, *oldhash;
++	unsigned int hbits = ep->hbits + 1;
++	unsigned int hmask = (1 << hbits) - 1;
++	int ii, res, hentries = hmask + 1;
++	unsigned long flags = *kflags;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_hashresize(%p) bits=%u\n", current, ep, hbits));
++
++	write_unlock_irqrestore(&ep->lock, flags);
++
++	res = -ENOMEM;
++	if (!(hash = (struct list_head *) vmalloc(hentries * sizeof(struct list_head)))) {
++		write_lock_irqsave(&ep->lock, flags);
++		goto out;
++	}
++
++	for (ii = 0; ii < hentries; ii++)
++		INIT_LIST_HEAD(&hash[ii]);
++
++	write_lock_irqsave(&ep->lock, flags);
++
++	oldhash = ep->hash;
++	for (ii = 0; ii <= ep->hmask; ii++) {
++		struct list_head *oldhead = &oldhash[ii], *lnk;
++
++		while ((lnk = list_first(oldhead))) {
++			struct epitem *dpi = list_entry(lnk, struct epitem, llink);
++
++			list_del(lnk);
++			list_add(lnk, &hash[dpi->pfd.fd & hmask]);
++		}
++	}
++
++	ep->hash = hash;
++	ep->hbits = hbits;
++	ep->hmask = hmask;
++
++	write_unlock_irqrestore(&ep->lock, flags);
++	vfree(oldhash);
++	write_lock_irqsave(&ep->lock, flags);
++
++	res = 0;
++out:
++	*kflags = flags;
++	atomic_dec(&ep->resize);
++	return res;
++}
++
++
++static int ep_insert(struct eventpoll *ep, struct pollfd *pfd)
++{
++	struct epitem *dpi;
++	struct file *file;
++	unsigned long flags;
++
++	if (atomic_read(&ep->hents) >= (ep->numpages * POLLFD_X_PAGE))
++		return -E2BIG;
++
++	if (!(file = fcheck(pfd->fd)))
++		return -EINVAL;
++
++	if (!(dpi = dpi_mem_alloc()))
++		return -ENOMEM;
++
++	INIT_LIST_HEAD(&dpi->llink);
++	dpi->ep = ep;
++	dpi->file = file;
++	dpi->pfd = *pfd;
++	dpi->index = -1;
++	dpi->ver = ep->ver - 1;
++
++	write_lock_irqsave(&ep->lock, flags);
++
++	list_add(&dpi->llink, &ep->hash[pfd->fd & ep->hmask]);
++	atomic_inc(&ep->hents);
++
++	if (!atomic_read(&ep->resize) &&
++			(atomic_read(&ep->hents) >> ep->hbits) > RESIZE_LENGTH &&
++			ep->hbits < MAX_HASH_BITS) {
++		atomic_inc(&ep->resize);
++		ep_hashresize(ep, &flags);
++	}
++
++	write_unlock_irqrestore(&ep->lock, flags);
++
++	file_notify_addcb(file, notify_proc, dpi);
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_insert(%p, %d)\n", current, ep, pfd->fd));
++
++	return 0;
++}
++
++
++static int ep_remove(struct eventpoll *ep, struct epitem *dpi)
++{
++	int fd = dpi->pfd.fd;
++	unsigned long flags;
++	struct pollfd *pfd, *lpfd;
++	struct epitem *ldpi;
++
++	file_notify_delcb(dpi->file, notify_proc);
++
++	write_lock_irqsave(&ep->lock, flags);
++
++	list_del(&dpi->llink);
++	atomic_dec(&ep->hents);
++
++	if (dpi->index >= 0 && dpi->ver == ep->ver && dpi->index < ep->eventcnt) {
++		pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
++								 EVENT_PAGE_OFFSET(dpi->index));
++		if (pfd->fd == dpi->pfd.fd && dpi->index < --ep->eventcnt) {
++			lpfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(ep->eventcnt)] +
++									  EVENT_PAGE_OFFSET(ep->eventcnt));
++			*pfd = *lpfd;
++
++			if ((ldpi = ep_find_nl(ep, pfd->fd))) ldpi->index = dpi->index;
++		}
++	}
++
++	write_unlock_irqrestore(&ep->lock, flags);
++
++	dpi_mem_free(dpi);
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ep_remove(%p, %d)\n", current, ep, fd));
++
++	return 0;
++}
++
++
++static void notify_proc(struct file *file, void *data, unsigned long *local, long *event)
++{
++	struct epitem *dpi = (struct epitem *) data;
++	struct eventpoll *ep = dpi->ep;
++	struct pollfd *pfd;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: notify(%p, %p, %ld, %ld) ep=%p\n",
++			current, file, data, event[0], event[1], ep));
++
++	write_lock(&ep->lock);
++	if (!(dpi->pfd.events & event[1]))
++		goto out;
++
++	if (dpi->index < 0 || dpi->ver != ep->ver) {
++		if (ep->eventcnt >= (ep->numpages * POLLFD_X_PAGE))
++			goto out;
++		dpi->index = ep->eventcnt++;
++		dpi->ver = ep->ver;
++		pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
++				EVENT_PAGE_OFFSET(dpi->index));
++		*pfd = dpi->pfd;
++	} else {
++		pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
++				EVENT_PAGE_OFFSET(dpi->index));
++		if (pfd->fd != dpi->pfd.fd) {
++			if (ep->eventcnt >= (ep->numpages * POLLFD_X_PAGE))
++				goto out;
++			dpi->index = ep->eventcnt++;
++			pfd = (struct pollfd *) (ep->pages[EVENT_PAGE_INDEX(dpi->index)] +
++					EVENT_PAGE_OFFSET(dpi->index));
++			*pfd = dpi->pfd;
++		}
++	}
++
++	pfd->revents |= (pfd->events & event[1]);
++
++	if (waitqueue_active(&ep->wq))
++		wake_up(&ep->wq);
++	if (waitqueue_active(&ep->poll_wait))
++		wake_up(&ep->poll_wait);
++out:
++	write_unlock(&ep->lock);
++}
++
++
++static int open_eventpoll(struct inode *inode, struct file *file)
++{
++	int res;
++	struct eventpoll *ep;
++
++	if (!(ep = kmalloc(sizeof(struct eventpoll), GFP_KERNEL)))
++		return -ENOMEM;
++
++	memset(ep, 0, sizeof(*ep));
++	if ((res = ep_init(ep))) {
++		kfree(ep);
++		return res;
++	}
++
++	file->private_data = ep;
++
++	MOD_INC_USE_COUNT;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: open() ep=%p\n", current, ep));
++	return 0;
++}
++
++
++static int close_eventpoll(struct inode *inode, struct file *file)
++{
++	struct eventpoll *ep = file->private_data;
++
++	ep_free(ep);
++
++	kfree(ep);
++
++	MOD_DEC_USE_COUNT;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: close() ep=%p\n", current, ep));
++	return 0;
++}
++
++
++static unsigned int poll_eventpoll(struct file *file, poll_table *wait)
++{
++	struct eventpoll *ep = file->private_data;
++
++	poll_wait(file, &ep->poll_wait, wait);
++	if (ep->eventcnt)
++		return POLLIN | POLLRDNORM;
++
++	return 0;
++}
++
++
++static int write_eventpoll(struct file *file, const char *buffer, size_t count,
++		loff_t *ppos)
++{
++	int res, rcount;
++	struct eventpoll *ep = file->private_data;
++	struct epitem *dpi;
++	struct pollfd pfd;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: write(%p, %d)\n", current, ep, count));
++
++	if (count % sizeof(struct pollfd))
++		return -EINVAL;
++
++	if ((res = verify_area(VERIFY_READ, buffer, count)))
++		return res;
++
++	rcount = 0;
++
++	lock_kernel();
++
++	while (count > 0) {
++		__copy_from_user(&pfd, buffer, sizeof(pfd));
++
++		dpi = ep_find(ep, pfd.fd);
++
++		if (pfd.fd >= current->files->max_fds || !current->files->fd[pfd.fd])
++			pfd.events = POLLREMOVE;
++		if (pfd.events & POLLREMOVE) {
++			if (dpi) {
++				ep_remove(ep, dpi);
++				rcount += sizeof(pfd);
++			}
++		}
++		else if (dpi) {
++			dpi->pfd.events = pfd.events;
++			rcount += sizeof(pfd);
++		} else {
++			pfd.revents = 0;
++			if (!ep_insert(ep, &pfd))
++				rcount += sizeof(pfd);
++		}
++
++		buffer += sizeof(pfd);
++		count -= sizeof(pfd);
++	}
++
++	unlock_kernel();
++
++	return rcount;
++}
++
++
++static int ep_poll(struct eventpoll *ep, void *arg)
++{
++	int res = 0;
++	long timeout;
++	unsigned long flags;
++	struct evpoll dvp;
++	wait_queue_t wait;
++
++	if (copy_from_user(&dvp, arg, sizeof(struct evpoll)))
++		return -EFAULT;
++
++	if (!atomic_read(&ep->mmapped))
++		return -EINVAL;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_POLL, %d)\n", current, ep, dvp.ep_timeout));
++
++	write_lock_irqsave(&ep->lock, flags);
++
++	res = 0;
++	if (!ep->eventcnt) {
++		init_waitqueue_entry(&wait, current);
++		add_wait_queue(&ep->wq, &wait);
++		timeout = dvp.ep_timeout == -1 || dvp.ep_timeout > MAX_SCHEDULE_TIMEOUT/HZ ?
++			MAX_SCHEDULE_TIMEOUT: (dvp.ep_timeout * HZ) / 1000;
++		for (;;) {
++			if (ep->eventcnt || !timeout)
++				break;
++			if (signal_pending(current)) {
++				res = -EINTR;
++				break;
++			}
++
++			set_current_state(TASK_INTERRUPTIBLE);
++
++			write_unlock_irqrestore(&ep->lock, flags);
++			timeout = schedule_timeout(timeout);
++			write_lock_irqsave(&ep->lock, flags);
++		}
++		remove_wait_queue(&ep->wq, &wait);
++
++		set_current_state(TASK_RUNNING);
++	}
++
++	if (!res && ep->eventcnt) {
++		res = ep->eventcnt;
++		ep->eventcnt = 0;
++		++ep->ver;
++		if (ep->pages == ep->pages0) {
++			ep->pages = ep->pages1;
++			dvp.ep_resoff = 0;
++		} else {
++			ep->pages = ep->pages0;
++			dvp.ep_resoff = ep->numpages * PAGE_SIZE;
++		}
++	}
++
++	write_unlock_irqrestore(&ep->lock, flags);
++
++	if (res > 0)
++		copy_to_user(arg, &dvp, sizeof(struct evpoll));
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_POLL, %d) == %d\n", current, ep, dvp.ep_timeout, res));
++	return res;
++}
++
++
++static int ioctl_eventpoll(struct inode *inode, struct file *file,
++		unsigned int cmd, unsigned long arg)
++{
++	int res, numpages;
++	struct eventpoll *ep = file->private_data;
++	struct epitem *dpi;
++	unsigned long flags;
++	struct pollfd pfd;
++
++	switch (cmd) {
++	case EP_ALLOC:
++		if (atomic_read(&ep->mmapped))
++			return -EBUSY;
++
++		numpages = EP_FDS_PAGES(arg);
++		if (numpages > MAX_EVENTPOLL_PAGES)
++			return -EINVAL;
++
++		res = 0;
++		write_lock_irqsave(&ep->lock, flags);
++		if (numpages > ep->numpages) {
++			if (!(res = ep_alloc_pages(&ep->pages0[ep->numpages], numpages - ep->numpages))) {
++				if (!(res = ep_alloc_pages(&ep->pages1[ep->numpages], numpages - ep->numpages))) {
++					ep->numpages = numpages;
++				} else {
++					ep_free_pages(&ep->pages0[ep->numpages], numpages - ep->numpages);
++				}
++			}
++		}
++		write_unlock_irqrestore(&ep->lock, flags);
++
++		DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_ALLOC, %lu) == %d\n",
++					 current, ep, arg, res));
++		return res;
++
++	case EP_FREE:
++		if (atomic_read(&ep->mmapped))
++			return -EBUSY;
++
++		res = -EINVAL;
++		write_lock_irqsave(&ep->lock, flags);
++		if (ep->numpages > 0) {
++			ep_free_pages(ep->pages0, ep->numpages);
++			ep_free_pages(ep->pages1, ep->numpages);
++			ep->numpages = 0;
++			ep->pages = ep->pages0;
++			res = 0;
++		}
++		write_unlock_irqrestore(&ep->lock, flags);
++
++		DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_FREE) == %d\n",
++					 current, ep, res));
++		return res;
++
++	case EP_POLL:
++		return ep_poll(ep, (void *) arg);
++
++	case EP_ISPOLLED:
++		if (copy_from_user(&pfd, (void *) arg, sizeof(struct pollfd)))
++			return 0;
++
++		read_lock_irqsave(&ep->lock, flags);
++
++		res = 0;
++		if (!(dpi = ep_find_nl(ep, pfd.fd)))
++			goto out_ispolled;
++
++		pfd = dpi->pfd;
++		res = 1;
++
++	out_ispolled:
++		read_unlock_irqrestore(&ep->lock, flags);
++
++		if (res)
++			copy_to_user((void *) arg, &pfd, sizeof(struct pollfd));
++
++		DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: ioctl(%p, EP_ISPOLLED, %d) == %d\n",
++					 current, ep, pfd.fd, res));
++		return res;
++	}
++
++	return -EINVAL;
++}
++
++
++static void eventpoll_mm_open(struct vm_area_struct * vma)
++{
++	struct file *file = vma->vm_file;
++	struct eventpoll *ep = file->private_data;
++
++	if (ep) atomic_inc(&ep->mmapped);
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mm_open(%p)\n", current, ep));
++}
++
++
++static void eventpoll_mm_close(struct vm_area_struct * vma)
++{
++	struct file *file = vma->vm_file;
++	struct eventpoll *ep = file->private_data;
++
++	if (ep) atomic_dec(&ep->mmapped);
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mm_close(%p)\n", current, ep));
++}
++
++
++static int mmap_eventpoll(struct file *file, struct vm_area_struct *vma)
++{
++	struct eventpoll *ep = file->private_data;
++	unsigned long start, flags;
++	int ii, res;
++	int numpages;
++	size_t mapsize;
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mmap(%p, %lx, %lx)\n",
++			current, ep, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT));
++
++	if ((vma->vm_pgoff << PAGE_SHIFT) != 0)
++		return -EINVAL;
++
++	mapsize = PAGE_ALIGN(vma->vm_end - vma->vm_start);
++	numpages = mapsize >> PAGE_SHIFT;
++
++	write_lock_irqsave(&ep->lock, flags);
++
++	res = -EINVAL;
++	if (numpages != (2 * ep->numpages))
++		goto out;
++
++	start = vma->vm_start;
++	for (ii = 0; ii < ep->numpages; ii++) {
++		if (remap_page_range(start, __pa(ep->pages0[ii]),
++				PAGE_SIZE, vma->vm_page_prot))
++    		goto out;
++		start += PAGE_SIZE;
++	}
++	for (ii = 0; ii < ep->numpages; ii++) {
++		if (remap_page_range(start, __pa(ep->pages1[ii]),
++				PAGE_SIZE, vma->vm_page_prot))
++    		goto out;
++		start += PAGE_SIZE;
++	}
++	vma->vm_ops = &eventpoll_mmap_ops;
++	atomic_set(&ep->mmapped, 1);
++	res = 0;
++out:
++	write_unlock_irqrestore(&ep->lock, flags);
++
++	DNPRINTK(3, (KERN_INFO "[%p] /dev/epoll: mmap(%p, %lx, %lx) == %d\n",
++		 	current, ep, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, res));
++	return res;
++}
++
++
++int __init eventpoll_init(void)
++{
++	dpi_cache = kmem_cache_create("eventpoll",
++			sizeof(struct epitem),
++			__alignof__(struct epitem),
++			DPI_SLAB_DEBUG, NULL, NULL);
++	if (!dpi_cache) {
++		printk(KERN_INFO "[%p] /dev/epoll: driver install failed.\n", current);
++		return -ENOMEM;
++	}
++
++	printk(KERN_INFO "[%p] /dev/epoll: driver installed.\n", current);
++
++	misc_register(&eventpoll);
++
++	return 0;
++}
++
++
++module_init(eventpoll_init);
++
++#ifdef MODULE
++
++void cleanup_module(void)
++{
++	misc_deregister(&eventpoll);
++	kmem_cache_destroy(dpi_cache);
++}
++
++#endif
++
++MODULE_LICENSE("GPL"); 
++
+diff -urN linux/fs/Makefile test/fs/Makefile
+--- linux/fs/Makefile	Tue Apr  2 18:56:58 2002
++++ test/fs/Makefile	Tue Apr 30 17:14:16 2002
+@@ -7,12 +7,12 @@
+ 
+ O_TARGET := fs.o
+ 
+-export-objs :=	filesystems.o open.o dcache.o buffer.o
++export-objs :=	filesystems.o open.o dcache.o buffer.o fcblist.o
+ mod-subdirs :=	nls
+ 
+ obj-y :=	open.o read_write.o devices.o file_table.o buffer.o \
+ 		super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \
+-		fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \
++		fcntl.o ioctl.o readdir.o select.o fifo.o locks.o fcblist.o \
+ 		dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \
+ 		filesystems.o namespace.o seq_file.o
+ 
+diff -urN linux/fs/fcblist.c test/fs/fcblist.c
+--- linux/fs/fcblist.c	Wed Dec 31 19:00:00 1969
++++ test/fs/fcblist.c	Tue Apr 30 17:14:16 2002
+@@ -0,0 +1,130 @@
++/*
++ *  linux/fs/fcblist.c
++ *
++ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
++ *
++ *  Handle file callbacks
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/poll.h>
++#include <asm/bitops.h>
++#include <linux/fcblist.h>
++
++
++long ion_band_table[NSIGPOLL] = {
++	ION_IN,		/* POLL_IN */
++	ION_OUT,	/* POLL_OUT */
++	ION_IN,		/* POLL_MSG */
++	ION_ERR,	/* POLL_ERR */
++	0,			/* POLL_PRI */
++	ION_HUP		/* POLL_HUP */
++};
++EXPORT_SYMBOL(ion_band_table);
++
++long poll_band_table[NSIGPOLL] = {
++	POLLIN | POLLRDNORM,			/* POLL_IN */
++	POLLOUT | POLLWRNORM | POLLWRBAND,	/* POLL_OUT */
++	POLLIN | POLLRDNORM | POLLMSG,		/* POLL_MSG */
++	POLLERR,				/* POLL_ERR */
++	POLLPRI | POLLRDBAND,			/* POLL_PRI */
++	POLLHUP | POLLERR			/* POLL_HUP */
++};
++EXPORT_SYMBOL(poll_band_table);
++
++
++void file_notify_event(struct file *filep, long *event)
++{
++	unsigned long flags;
++	struct list_head *lnk, *lsthead;
++
++	fcblist_read_lock(filep, flags);
++
++	lsthead = &filep->f_cblist;
++	list_for_each(lnk, lsthead) {
++		struct fcb_struct *fcbp = list_entry(lnk, struct fcb_struct, llink);
++
++		fcbp->cbproc(filep, fcbp->data, fcbp->local, event);
++	}
++
++	fcblist_read_unlock(filep, flags);
++}
++EXPORT_SYMBOL(file_notify_event);
++
++
++int file_notify_addcb(struct file *filep,
++		void (*cbproc)(struct file *, void *, unsigned long *, long *), void *data)
++{
++	unsigned long flags;
++	struct fcb_struct *fcbp;
++
++	if (!(fcbp = (struct fcb_struct *) kmalloc(sizeof(struct fcb_struct), GFP_KERNEL)))
++		return -ENOMEM;
++
++	memset(fcbp, 0, sizeof(struct fcb_struct));
++	fcbp->cbproc = cbproc;
++	fcbp->data = data;
++
++	fcblist_write_lock(filep, flags);
++	list_add_tail(&fcbp->llink, &filep->f_cblist);
++	fcblist_write_unlock(filep, flags);
++
++	return 0;
++}
++EXPORT_SYMBOL(file_notify_addcb);
++
++
++int file_notify_delcb(struct file *filep,
++		void (*cbproc)(struct file *, void *, unsigned long *, long *))
++{
++	unsigned long flags;
++	struct list_head *lnk, *lsthead;
++
++	fcblist_write_lock(filep, flags);
++
++	lsthead = &filep->f_cblist;
++	list_for_each(lnk, lsthead) {
++		struct fcb_struct *fcbp = list_entry(lnk, struct fcb_struct, llink);
++
++		if (fcbp->cbproc == cbproc) {
++			list_del(lnk);
++			fcblist_write_unlock(filep, flags);
++			kfree(fcbp);
++			return 0;
++		}
++	}
++
++	fcblist_write_unlock(filep, flags);
++
++	return -ENOENT;
++}
++EXPORT_SYMBOL(file_notify_delcb);
++
++
++void file_notify_cleanup(struct file *filep)
++{
++	unsigned long flags;
++	struct list_head *lnk, *lsthead;
++
++	fcblist_write_lock(filep, flags);
++
++	lsthead = &filep->f_cblist;
++	while ((lnk = list_first(lsthead))) {
++		struct fcb_struct *fcbp = list_entry(lnk, struct fcb_struct, llink);
++
++		list_del(lnk);
++		fcblist_write_unlock(filep, flags);
++		kfree(fcbp);
++		fcblist_write_lock(filep, flags);
++	}
++
++	fcblist_write_unlock(filep, flags);
++}
++EXPORT_SYMBOL(file_notify_cleanup);
++
+diff -urN linux/fs/file_table.c test/fs/file_table.c
+--- linux/fs/file_table.c	Tue Apr  2 18:56:57 2002
++++ test/fs/file_table.c	Tue Apr 30 17:23:00 2002
+@@ -8,6 +8,7 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ #include <linux/file.h>
++#include <linux/fcblist.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/smp_lock.h>
+@@ -47,6 +48,7 @@
+ 		f->f_uid = current->fsuid;
+ 		f->f_gid = current->fsgid;
+ 		list_add(&f->f_list, &anon_list);
++		file_notify_init(f);
+ 		file_list_unlock();
+ 		return f;
+ 	}
+@@ -91,6 +93,7 @@
+ 	filp->f_uid    = current->fsuid;
+ 	filp->f_gid    = current->fsgid;
+ 	filp->f_op     = dentry->d_inode->i_fop;
++	file_notify_init(filp);
+ 	if (filp->f_op->open)
+ 		return filp->f_op->open(dentry->d_inode, filp);
+ 	else
+@@ -109,6 +112,7 @@
+ 	struct vfsmount * mnt = file->f_vfsmnt;
+ 	struct inode * inode = dentry->d_inode;
+ 
++	file_notify_cleanup(file);
+ 	locks_remove_flock(file);
+ 
+ 	if (file->f_iobuf)
+diff -urN linux/fs/pipe.c test/fs/pipe.c
+--- linux/fs/pipe.c	Tue Apr  2 18:47:24 2002
++++ test/fs/pipe.c	Tue Apr 30 17:14:16 2002
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/fcblist.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
+@@ -40,6 +41,7 @@
+ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
++	int pfull;
+ 	ssize_t size, read, ret;
+ 
+ 	/* Seeks are not allowed on pipes.  */
+@@ -72,6 +74,7 @@
+ 			PIPE_WAITING_READERS(*inode)++;
+ 			pipe_wait(inode);
+ 			PIPE_WAITING_READERS(*inode)--;
++			pfull = PIPE_FULL(*inode);
+ 			ret = -ERESTARTSYS;
+ 			if (signal_pending(current))
+ 				goto out;
+@@ -82,6 +85,8 @@
+ 				goto out;
+ 		}
+ 	}
++	else
++		pfull = PIPE_FULL(*inode);
+ 
+ 	/* Read what data is available.  */
+ 	ret = -EFAULT;
+@@ -104,6 +109,9 @@
+ 		count -= chars;
+ 		buf += chars;
+ 	}
++	/* Send notification message */
++	if (pfull && !PIPE_FULL(*inode) && PIPE_WRITEFILE(*inode))
++		file_send_notify(PIPE_WRITEFILE(*inode), ION_OUT, POLLOUT | POLLWRNORM | POLLWRBAND);
+ 
+ 	/* Cache behaviour optimization */
+ 	if (!PIPE_LEN(*inode))
+@@ -138,6 +146,7 @@
+ pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
++	int pempty;
+ 	ssize_t free, written, ret;
+ 
+ 	/* Seeks are not allowed on pipes.  */
+@@ -182,6 +191,7 @@
+ 	}
+ 
+ 	/* Copy into available space.  */
++	pempty = PIPE_EMPTY(*inode);
+ 	ret = -EFAULT;
+ 	while (count > 0) {
+ 		int space;
+@@ -210,6 +220,9 @@
+ 			break;
+ 
+ 		do {
++			/* Send notification message */
++			if (pempty && !PIPE_EMPTY(*inode) && PIPE_READFILE(*inode))
++				file_send_notify(PIPE_READFILE(*inode), ION_IN, POLLIN | POLLRDNORM);
+ 			/*
+ 			 * Synchronous wake-up: it knows that this process
+ 			 * is going to give up this CPU, so it doesnt have
+@@ -219,6 +232,7 @@
+ 			PIPE_WAITING_WRITERS(*inode)++;
+ 			pipe_wait(inode);
+ 			PIPE_WAITING_WRITERS(*inode)--;
++			pempty = PIPE_EMPTY(*inode);
+ 			if (signal_pending(current))
+ 				goto out;
+ 			if (!PIPE_READERS(*inode))
+@@ -227,6 +241,9 @@
+ 		ret = -EFAULT;
+ 	}
+ 
++	/* Send notification message */
++	if (pempty && !PIPE_EMPTY(*inode) && PIPE_READFILE(*inode))
++		file_send_notify(PIPE_READFILE(*inode), ION_IN, POLLIN | POLLRDNORM);
+ 	/* Signal readers asynchronously that there is more data.  */
+ 	wake_up_interruptible(PIPE_WAIT(*inode));
+ 
+@@ -299,9 +316,22 @@
+ static int
+ pipe_release(struct inode *inode, int decr, int decw)
+ {
++	struct file *rdfile, *wrfile;
+ 	down(PIPE_SEM(*inode));
+ 	PIPE_READERS(*inode) -= decr;
+ 	PIPE_WRITERS(*inode) -= decw;
++	rdfile = PIPE_READFILE(*inode);
++	wrfile = PIPE_WRITEFILE(*inode);
++ 	if (decr && !PIPE_READERS(*inode)) {
++		PIPE_READFILE(*inode) = NULL;
++		if (wrfile)
++			file_send_notify(wrfile, ION_HUP, POLLHUP);
++	}
++	if (decw && !PIPE_WRITERS(*inode)) {
++		PIPE_WRITEFILE(*inode) = NULL;
++		if (rdfile)
++			file_send_notify(rdfile, ION_HUP, POLLHUP);
++	}
+ 	if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
+ 		struct pipe_inode_info *info = inode->i_pipe;
+ 		inode->i_pipe = NULL;
+@@ -454,6 +484,7 @@
+ 	PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 0;
+ 	PIPE_WAITING_READERS(*inode) = PIPE_WAITING_WRITERS(*inode) = 0;
+ 	PIPE_RCOUNTER(*inode) = PIPE_WCOUNTER(*inode) = 1;
++	PIPE_READFILE(*inode) = PIPE_WRITEFILE(*inode) = NULL;
+ 
+ 	return inode;
+ fail_page:
+@@ -561,6 +592,9 @@
+ 	f2->f_mode = 2;
+ 	f2->f_version = 0;
+ 
++	PIPE_READFILE(*inode) = f1;
++	PIPE_WRITEFILE(*inode) = f2;
++
+ 	fd_install(i, f1);
+ 	fd_install(j, f2);
+ 	fd[0] = i;
+diff -urN linux/include/asm-i386/poll.h test/include/asm-i386/poll.h
+--- linux/include/asm-i386/poll.h	Tue Apr  2 18:47:30 2002
++++ test/include/asm-i386/poll.h	Tue Apr 30 17:14:16 2002
+@@ -15,6 +15,7 @@
+ #define POLLWRNORM	0x0100
+ #define POLLWRBAND	0x0200
+ #define POLLMSG		0x0400
++#define POLLREMOVE	0x1000
+ 
+ struct pollfd {
+ 	int fd;
+diff -urN linux/include/linux/eventpoll.h test/include/linux/eventpoll.h
+--- linux/include/linux/eventpoll.h	Wed Dec 31 19:00:00 1969
++++ test/include/linux/eventpoll.h	Tue Apr 30 17:14:16 2002
+@@ -0,0 +1,43 @@
++/*
++ *  include/linux/eventpoll.h
++ *
++ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
++ *
++ *  Efficent event polling implementation
++ */
++
++
++#ifndef _LINUX_EVENTPOLL_H
++#define _LINUX_EVENTPOLL_H
++
++
++
++
++#define EVENTPOLL_MINOR	124
++#define POLLFD_X_PAGE	(PAGE_SIZE / sizeof(struct pollfd))
++#define MAX_FDS_IN_EVENTPOLL	(1024 * 128)
++#define MAX_EVENTPOLL_PAGES	(MAX_FDS_IN_EVENTPOLL / POLLFD_X_PAGE)
++#define EVENT_PAGE_INDEX(n)	((n) / POLLFD_X_PAGE)
++#define EVENT_PAGE_REM(n)	((n) % POLLFD_X_PAGE)
++#define EVENT_PAGE_OFFSET(n)	(((n) % POLLFD_X_PAGE) * sizeof(struct pollfd))
++#define EP_FDS_PAGES(n)	(((n) + POLLFD_X_PAGE - 1) / POLLFD_X_PAGE)
++#define EP_MAP_SIZE(n)	(EP_FDS_PAGES(n) * PAGE_SIZE * 2)
++
++
++
++
++
++struct evpoll {
++	int ep_timeout;
++	unsigned long ep_resoff;
++};
++
++#define EP_ALLOC	_IOR('P', 1, int)
++#define EP_POLL		_IOWR('P', 2, struct evpoll)
++#define EP_FREE		_IO('P', 3)
++#define EP_ISPOLLED	_IOWR('P', 4, struct pollfd)
++
++
++
++#endif
++
+diff -urN linux/include/linux/fcblist.h test/include/linux/fcblist.h
+--- linux/include/linux/fcblist.h	Wed Dec 31 19:00:00 1969
++++ test/include/linux/fcblist.h	Tue Apr 30 17:14:16 2002
+@@ -0,0 +1,67 @@
++/*
++ *  include/linux/fcblist.h
++ *
++ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
++ *
++ *  Handle file callbacks
++ */
++
++#ifndef __LINUX_FCBLIST_H
++#define __LINUX_FCBLIST_H
++
++#include <linux/config.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/file.h>
++
++
++
++/* file callback notification events */
++#define ION_IN		1
++#define ION_OUT		2
++#define ION_HUP		3
++#define ION_ERR		4
++
++#define FCB_LOCAL_SIZE	4
++
++#define fcblist_read_lock(fp, fl)		read_lock_irqsave(&(fp)->f_cblock, fl)
++#define fcblist_read_unlock(fp, fl)		read_unlock_irqrestore(&(fp)->f_cblock, fl)
++#define fcblist_write_lock(fp, fl)		write_lock_irqsave(&(fp)->f_cblock, fl)
++#define fcblist_write_unlock(fp, fl)	write_unlock_irqrestore(&(fp)->f_cblock, fl)
++
++struct fcb_struct {
++	struct list_head llink;
++	void (*cbproc)(struct file *, void *, unsigned long *, long *);
++	void *data;
++	unsigned long local[FCB_LOCAL_SIZE];
++};
++
++
++extern long ion_band_table[];
++extern long poll_band_table[];
++
++
++void file_notify_event(struct file *filep, long *event);
++
++int file_notify_addcb(struct file *filep,
++		void (*cbproc)(struct file *, void *, unsigned long *, long *), void *data);
++
++int file_notify_delcb(struct file *filep,
++		void (*cbproc)(struct file *, void *, unsigned long *, long *));
++
++void file_notify_cleanup(struct file *filep);
++
++
++static inline void file_notify_init(struct file *filep)
++{
++	rwlock_init(&filep->f_cblock);
++	INIT_LIST_HEAD(&filep->f_cblist);
++}
++
++static inline void file_send_notify(struct file *filep, long ioevt, long plevt) {
++	long event[] = { ioevt, plevt, -1 };
++
++	file_notify_event(filep, event);
++}
++
++#endif
+diff -urN linux/include/linux/fs.h test/include/linux/fs.h
+--- linux/include/linux/fs.h	Mon Apr 29 17:57:06 2002
++++ test/include/linux/fs.h	Tue Apr 30 17:14:16 2002
+@@ -538,6 +538,10 @@
+ 	/* needed for tty driver, and maybe others */
+ 	void			*private_data;
+ 
++	/* file callback list */
++	rwlock_t f_cblock;
++	struct list_head f_cblist;
++
+ 	/* preallocated helper kiobuf to speedup O_DIRECT */
+ 	struct kiobuf		*f_iobuf;
+ 	long			f_iobuf_lock;
+diff -urN linux/include/linux/list.h test/include/linux/list.h
+--- linux/include/linux/list.h	Tue Apr  2 19:14:27 2002
++++ test/include/linux/list.h	Tue Apr 30 17:14:16 2002
+@@ -172,6 +172,11 @@
+         	pos = pos->prev, prefetch(pos->prev))
+         	
+ 
++#define list_first(head)	(((head)->next != (head)) ? (head)->next: (struct list_head *) 0)
++#define list_last(head)	(((head)->prev != (head)) ? (head)->prev: (struct list_head *) 0)
++#define list_next(pos, head)	(((pos)->next != (head)) ? (pos)->next: (struct list_head *) 0)
++#define list_prev(pos, head)	(((pos)->prev != (head)) ? (pos)->prev: (struct list_head *) 0)
++
+ #endif /* __KERNEL__ || _LVM_H_INCLUDE */
+ 
+ #endif
+diff -urN linux/include/linux/pipe_fs_i.h test/include/linux/pipe_fs_i.h
+--- linux/include/linux/pipe_fs_i.h	Tue Apr  2 18:47:28 2002
++++ test/include/linux/pipe_fs_i.h	Tue Apr 30 17:14:16 2002
+@@ -13,6 +13,8 @@
+ 	unsigned int waiting_writers;
+ 	unsigned int r_counter;
+ 	unsigned int w_counter;
++	struct file *rdfile;
++	struct file *wrfile;
+ };
+ 
+ /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+@@ -30,6 +32,8 @@
+ #define PIPE_WAITING_WRITERS(inode)	((inode).i_pipe->waiting_writers)
+ #define PIPE_RCOUNTER(inode)	((inode).i_pipe->r_counter)
+ #define PIPE_WCOUNTER(inode)	((inode).i_pipe->w_counter)
++#define PIPE_READFILE(inode)	((inode).i_pipe->rdfile)
++#define PIPE_WRITEFILE(inode)	((inode).i_pipe->wrfile)
+ 
+ #define PIPE_EMPTY(inode)	(PIPE_LEN(inode) == 0)
+ #define PIPE_FULL(inode)	(PIPE_LEN(inode) == PIPE_SIZE)
+diff -urN linux/include/net/sock.h test/include/net/sock.h
+--- linux/include/net/sock.h	Mon Apr 29 17:57:17 2002
++++ test/include/net/sock.h	Tue Apr 30 17:27:00 2002
+@@ -105,6 +105,10 @@
+ 
+ #include <asm/atomic.h>
+ #include <net/dst.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/fcblist.h>
++
+ #include <linux/aio.h>
+ 
+ 
+@@ -1227,8 +1231,13 @@
+ 
+ static inline void sk_wake_async(struct sock *sk, int how, int band)
+ {
+-	if (sk->socket && sk->socket->fasync_list)
++	if (sk->socket) {
++		if (sk->socket->file)
++			file_send_notify(sk->socket->file, ion_band_table[band - POLL_IN],
++					poll_band_table[band - POLL_IN]);
++		if (sk->socket->fasync_list)
+ 		sock_wake_async(sk->socket, how, band);
++	}
+ }
+ 
+ #define SOCK_MIN_SNDBUF 2048
+diff -urN linux/net/ipv4/tcp.c test/net/ipv4/tcp.c
+--- linux/net/ipv4/tcp.c	Thu Apr 11 16:17:49 2002
++++ test/net/ipv4/tcp.c	Tue Apr 30 17:14:16 2002
+@@ -471,8 +471,8 @@
+ 		if (sk->sleep && waitqueue_active(sk->sleep))
+ 			wake_up_interruptible(sk->sleep);
+ 
+-		if (sock->fasync_list && !(sk->shutdown&SEND_SHUTDOWN))
+-			sock_wake_async(sock, 2, POLL_OUT);
++		if (!(sk->shutdown&SEND_SHUTDOWN))
++			sk_wake_async(sk, 2, POLL_OUT);
+ 	}
+ }
+ 
diff -urN v2.4.19-pre5/ulib/COPYING linux.diff/ulib/COPYING
--- v2.4.19-pre5/ulib/COPYING	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/COPYING	Fri Apr 19 20:54:05 2002
@@ -0,0 +1,515 @@
+
+                  GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations
+below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+^L
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it
+becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+^L
+                  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control
+compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+^L
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+^L
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+^L
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+^L
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply, and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License
+may add an explicit geographical distribution limitation excluding those
+countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+^L
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+                            NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+^L
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms
+of the ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.
+It is safest to attach them to the start of each source file to most
+effectively convey the exclusion of warranty; and each file should
+have at least the "copyright" line and a pointer to where the full
+notice is found.
+
+
+    <one line to give the library's name and a brief idea of what it
+does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+
+Also add information on how to contact you by electronic and paper
+mail.
+
+You should also get your employer (if you work as a programmer) or
+your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James
+Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff -urN v2.4.19-pre5/ulib/Makefile linux.diff/ulib/Makefile
--- v2.4.19-pre5/ulib/Makefile	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/Makefile	Fri Apr 19 20:58:01 2002
@@ -0,0 +1,50 @@
+#  Makefile - libredhat-kernel.so build code.
+#
+#    Copyright 2002 Red Hat, Inc.  All Rights Reserved.
+#
+#    This library is free software; you can redistribute it and/or
+#    modify it under the terms of the GNU Lesser General Public
+#    License as published by the Free Software Foundation; either
+#    version 2 of the License, or (at your option) any later version.
+#
+#    This library is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#    Lesser General Public License for more details.
+#
+#    You should have received a copy of the GNU Lesser General Public
+#    License along with this library; if not, write to the Free Software
+#    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+#
+#
+all: libredhat-kernel.so
+
+ASFLAGS=-D__KERNEL__ -D__ASSEMBLY__ -I../include -nostdlib -nostartfiles
+CFLAGS=-D__KERNEL__ -I../include -nostdlib -nostartfiles
+
+so_objs=vsysaddr.o kso_init.o
+
+vsysaddr.S: ../System.map stub.S Makefile
+	rm -f vsysaddr.S
+	echo '#include "stub.S"' >vsysaddr.S
+	awk -- "/^00000000bfff.* vsys_/ { print \"dynamic_syscall(\"\$$3 \",0x\" \$$1 \")\"; }" <../System.map >>vsysaddr.S
+	awk -- "/^bfff.* vsys_/ { print \"dynamic_syscall(\"\$$3 \",0x\" \$$1 \")\"; }" <../System.map >>vsysaddr.S
+
+vsysaddr.o: vsysaddr.S
+
+kso_init.o: ../include/linux/compile.h
+
+libredhat-kernel.so.1.0.1: $(so_objs) libredhat-kernel.map
+	gcc -nostdlib -nostartfiles -shared -Wl,--version-script=libredhat-kernel.map -Wl,-soname=libredhat-kernel.so.1 -o $@  $(so_objs)
+	cp $@ $@.save
+	strip $@
+
+libredhat-kernel.so: libredhat-kernel.so.1.0.1
+	ln -sf $< $@
+
+clean:
+	rm -f *.o libredhat-kernel.so myln libredhat-kernel.so.1* vsysaddr.S
+
+# test app
+myln: myln.c libredhat-kernel.so Makefile
+	cc -g -o myln myln.c -L. -lredhat-kernel
diff -urN v2.4.19-pre5/ulib/README linux.diff/ulib/README
--- v2.4.19-pre5/ulib/README	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/README	Fri Apr 19 20:54:05 2002
@@ -0,0 +1,2 @@
+The libredhat-kernel code is provided under the terms of the LGPL.  
+See the file COPYING for details.
diff -urN v2.4.19-pre5/ulib/kso_init.c linux.diff/ulib/kso_init.c
--- v2.4.19-pre5/ulib/kso_init.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/kso_init.c	Fri Apr 19 20:54:05 2002
@@ -0,0 +1,67 @@
+/* kso_init.c - libredhat-kernel.so startup code.
+
+    Copyright 2002 Red Hat, Inc.  All Rights Reserved.
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+
+ */
+#include <linux/compile.h>
+#include <linux/types.h>
+#include <asm/unistd.h>
+#include <asm/fcntl.h>
+#include <asm/mman.h>
+#include <asm/a.out.h>
+
+char libredhat_kernel_enosys = 1;	/* the asm in stub.S depends on this */
+
+long _init(void)
+{
+	static char unique[] = { LINUX_UNIQUE_BYTES };
+	int errno;
+	long addr;
+	int fd;
+	int i;
+
+	_syscall6(int, mmap2, unsigned long, addr, unsigned long, len,
+        	  unsigned long, prot, unsigned long, flags,
+        	  unsigned long, fd, unsigned long, pgoff)
+	_syscall2(long, munmap, unsigned long, addr, size_t, len)
+	_syscall2(int, open, const char *, name, int, flags)
+	_syscall1(int, close, int, fd)
+
+	if (sizeof(unique) != 16)
+		return -1;
+
+	fd = open("/dev/vsys", O_RDONLY);
+	if (-1 == fd)
+		return -1;
+
+	addr = mmap2(0, VSYSCALL_SIZE, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
+	if (-1 == addr)
+		return -1;
+
+	close(fd);
+
+	for (i=0; i<sizeof(unique); i++)
+		if (unique[i] != ((char *)addr)[i]) {
+			munmap(addr, VSYSCALL_SIZE);
+			return -1;
+		}
+
+	/* okay, all the syscalls we provide are now good */
+	libredhat_kernel_enosys = 0;
+	return 0;
+}
+
diff -urN v2.4.19-pre5/ulib/libredhat-kernel.map linux.diff/ulib/libredhat-kernel.map
--- v2.4.19-pre5/ulib/libredhat-kernel.map	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/libredhat-kernel.map	Tue Apr  2 18:56:58 2002
@@ -0,0 +1,11 @@
+REDHAT_0.90 {
+	global:
+		vsys_io_setup;
+		vsys_io_destroy;
+		vsys_io_submit;
+		vsys_io_cancel;
+		vsys_io_wait;
+		vsys_io_getevents;
+	local:
+		*;
+};
diff -urN v2.4.19-pre5/ulib/myln.c linux.diff/ulib/myln.c
--- v2.4.19-pre5/ulib/myln.c	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/myln.c	Tue Apr  2 18:56:58 2002
@@ -0,0 +1,25 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+int main ()
+{
+	long ctx = 0;
+	extern long vsys_io_setup(long, long *);
+	unsigned char *bob = (void*)&vsys_io_setup;
+	long ret;
+	int i;
+	printf("%p\n", bob);
+	//printf("%p\n", mmap(0, 65536, PROT_READ | PROT_EXEC, MAP_SHARED,
+	//	open("/dev/vsys", O_RDONLY), 0));
+	//for (i=0; i<16; i++)
+	//	printf(" %02x\n", bob[i]);
+	//printf("\n");
+
+	ret = vsys_io_setup(100, &ctx);
+
+	printf("ret=%ld, ctx=0x%lx\n", ret, ctx);
+	return 0;
+}
diff -urN v2.4.19-pre5/ulib/stub.S linux.diff/ulib/stub.S
--- v2.4.19-pre5/ulib/stub.S	Wed Dec 31 19:00:00 1969
+++ linux.diff/ulib/stub.S	Fri Apr 19 20:54:05 2002
@@ -0,0 +1,38 @@
+/* stub.S - libredhat-kernel.so jump code.
+
+    Copyright 2002 Red Hat, Inc.  All Rights Reserved.
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+
+ */
+/* stub.S */
+#include <asm/segment.h>
+#include <asm/errno.h>
+
+	.text
+
+#define dynamic_syscall(x,a) \
+	.globl	x				;\
+	.type	x, @function			;\
+	.align 16				;\
+	x:					;\
+		cmpb $0,libredhat_kernel_enosys	;\
+		jne 1f				;\
+		ljmp $__USER_CS, $a		;\
+	1:					;\
+		movl	$-ENOSYS,%eax		;\
+		ret				;\
+	.size	 x,.-x
+
