diff -ur v2.4.1/mm/filemap.c work/mm/filemap.c
--- v2.4.1/mm/filemap.c	Tue Jan 30 12:08:49 2001
+++ work/mm/filemap.c	Wed Jan 31 13:58:29 2001
@@ -2023,9 +2023,7 @@
 	if (vma->vm_flags & VM_LOCKED)
 		return -EINVAL;
 
-	flush_cache_range(vma->vm_mm, start, end);
 	zap_page_range(vma->vm_mm, start, end - start);
-	flush_tlb_range(vma->vm_mm, start, end);
 	return 0;
 }
 
diff -ur v2.4.1/mm/memory.c work/mm/memory.c
--- v2.4.1/mm/memory.c	Tue Jan 30 12:08:49 2001
+++ work/mm/memory.c	Wed Jan 31 13:57:34 2001
@@ -262,40 +262,71 @@
 	return -ENOMEM;
 }
 
+#define FREE_PTE_NR	510
+
+struct free_pte_ctx {
+	int		freed, nr;
+	unsigned long	start_addr, end_addr;
+	pte_t	ptes[FREE_PTE_NR];
+};
+
+#define INIT_FREE_PTE_CTX(X, addr) do {\
+		(X)->nr = 0;\
+		(X)->freed = 0;\
+		(X)->start_addr = (addr);\
+	} while (0)
+
 /*
  * Return indicates whether a page was freed so caller can adjust rss
  */
-static inline int free_pte(pte_t pte)
+#define free_pte(mm, ctxp, pte, addr) do {\
+		(ctxp)->ptes[(ctxp)->nr++] = (pte);\
+		(ctxp)->end_addr = (addr) + PAGE_SIZE;\
+		if ((ctxp)->nr >= FREE_PTE_NR)\
+			free_ptes(mm, (ctxp));\
+	} while (0)
+
+static void free_ptes(struct mm_struct *mm, struct free_pte_ctx *ctx)
 {
-	if (pte_present(pte)) {
-		struct page *page = pte_page(pte);
-		if ((!VALID_PAGE(page)) || PageReserved(page))
-			return 0;
-		/* 
-		 * free_page() used to be able to clear swap cache
-		 * entries.  We may now have to do it manually.  
-		 */
-		if (pte_dirty(pte) && page->mapping)
-			set_page_dirty(page);
-		free_page_and_swap_cache(page);
-		return 1;
+	int i, nr;
+
+	flush_tlb_range(mm, ctx->start_addr, ctx->end_addr);
+	nr = ctx->nr;
+	ctx->nr = 0;
+	for (i=0; i < nr; i++) {
+		pte_t pte = ctx->ptes[i];
+
+		if (pte_present(pte)) {
+			struct page *page = pte_page(pte);
+			if ((!VALID_PAGE(page)) || PageReserved(page))
+				continue;
+			/* 
+			 * free_page() used to be able to clear swap cache
+			 * entries.  We may now have to do it manually.  
+			 */
+			if (pte_dirty(pte) && page->mapping)
+				set_page_dirty(page);
+			free_page_and_swap_cache(page);
+			ctx->freed++;
+		} else
+			swap_free(pte_to_swp_entry(pte));
 	}
-	swap_free(pte_to_swp_entry(pte));
-	return 0;
+	ctx->start_addr = ctx->end_addr;
 }
 
 static inline void forget_pte(pte_t page)
 {
 	if (!pte_none(page)) {
 		printk("forget_pte: old mapping existed!\n");
-		free_pte(page);
+		BUG();
 	}
 }
 
 static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size)
 {
+	struct free_pte_ctx free_ctx;
+	unsigned long offset;
 	pte_t * pte;
-	int freed;
 
 	if (pmd_none(*pmd))
 		return 0;
@@ -308,20 +339,18 @@
 	address &= ~PMD_MASK;
 	if (address + size > PMD_SIZE)
 		size = PMD_SIZE - address;
-	size >>= PAGE_SHIFT;
-	freed = 0;
-	for (;;) {
-		pte_t page;
-		if (!size)
-			break;
-		page = ptep_get_and_clear(pte);
-		pte++;
-		size--;
+	INIT_FREE_PTE_CTX(&free_ctx, address);
+	size &= PAGE_MASK;
+	for (offset=0; offset < size; pte++, offset += PAGE_SIZE) {
+		pte_t page = ptep_get_and_clear(pte);
 		if (pte_none(page))
 			continue;
-		freed += free_pte(page);
+		free_pte(mm, &free_ctx, page, address + offset);
 	}
-	return freed;
+	/* this will flush any remaining tlb entries */
+	free_ptes(mm, &free_ctx);
+
+	return free_ctx.freed;
 }
 
 static inline int zap_pmd_range(struct mm_struct *mm, pgd_t * dir, unsigned long address, unsigned long size)
@@ -360,6 +389,8 @@
 	unsigned long end = address + size;
 	int freed = 0;
 
+	flush_cache_range(mm, st, end);
+
 	dir = pgd_offset(mm, address);
 
 	/*
diff -ur v2.4.1/mm/mmap.c work/mm/mmap.c
--- v2.4.1/mm/mmap.c	Tue Jan 30 12:08:49 2001
+++ work/mm/mmap.c	Wed Jan 31 13:57:44 2001
@@ -363,9 +363,7 @@
 	vma->vm_file = NULL;
 	fput(file);
 	/* Undo any partial mapping done by a device driver. */
-	flush_cache_range(mm, vma->vm_start, vma->vm_end);
 	zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
-	flush_tlb_range(mm, vma->vm_start, vma->vm_end);
 free_vma:
 	kmem_cache_free(vm_area_cachep, vma);
 	return error;
@@ -740,9 +738,7 @@
 		remove_shared_vm_struct(mpnt);
 		mm->map_count--;
 
-		flush_cache_range(mm, st, end);
 		zap_page_range(mm, st, size);
-		flush_tlb_range(mm, st, end);
 
 		/*
 		 * Fix the mapping, and free the old area if it wasn't reused.
diff -ur v2.4.1/mm/mremap.c work/mm/mremap.c
--- v2.4.1/mm/mremap.c	Fri Dec 29 17:07:24 2000
+++ work/mm/mremap.c	Wed Jan 31 13:56:23 2001
@@ -119,7 +119,6 @@
 	while ((offset += PAGE_SIZE) < len)
 		move_one_page(mm, new_addr + offset, old_addr + offset);
 	zap_page_range(mm, new_addr, len);
-	flush_tlb_range(mm, new_addr, new_addr + len);
 	return -1;
 }
 
