diff -urN 2.2.17pre3/fs/buffer.c 2.2.17pre3-VM/fs/buffer.c
--- 2.2.17pre3/fs/buffer.c	Tue Jun 13 03:48:14 2000
+++ 2.2.17pre3-VM/fs/buffer.c	Sat Jun 17 02:40:47 2000
@@ -1464,6 +1464,25 @@
 #define BUFFER_BUSY_BITS	((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
 #define buffer_busy(bh)		((bh)->b_count || ((bh)->b_state & BUFFER_BUSY_BITS))
 
+static inline int sync_page_buffers(struct buffer_head * bh)
+{
+	struct buffer_head * tmp = bh;
+
+	do {
+		if (buffer_dirty(tmp) && !buffer_locked(tmp))
+			ll_rw_block(WRITE, 1, &tmp);
+		tmp = tmp->b_this_page;
+	} while (tmp != bh);
+
+	do {
+		if (buffer_busy(tmp))
+			return 1;
+		tmp = tmp->b_this_page;
+	} while (tmp != bh);
+
+	return 0;
+}
+
 /*
  * try_to_free_buffers() checks if all the buffers on this particular page
  * are unused, and free's the page if so.
@@ -1477,16 +1496,12 @@
 
 	tmp = bh;
 	do {
-		struct buffer_head * p = tmp;
-
+		if (buffer_busy(tmp))
+			goto busy;
 		tmp = tmp->b_this_page;
-		if (!buffer_busy(p))
-			continue;
-
-		wakeup_bdflush(0);
-		return 0;
 	} while (tmp != bh);
 
+ succeed:
 	tmp = bh;
 	do {
 		struct buffer_head * p = tmp;
@@ -1504,6 +1519,17 @@
 	page_map->buffers = NULL;
 	__free_page(page_map);
 	return 1;
+
+ busy:
+	if (!sync_page_buffers(bh))
+		/*
+		 * We can jump after the busy check because
+		 * we rely on the kernel lock.
+		 */
+		goto succeed;
+
+	wakeup_bdflush(0);
+	return 0;
 }
 
 /* ================== Debugging =================== */
diff -urN 2.2.17pre3/include/linux/sched.h 2.2.17pre3-VM/include/linux/sched.h
--- 2.2.17pre3/include/linux/sched.h	Wed May 10 22:26:45 2000
+++ 2.2.17pre3-VM/include/linux/sched.h	Sat Jun 17 02:40:47 2000
@@ -291,7 +291,6 @@
 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
 	unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
 	int swappable:1;
-	int trashing_mem:1;
 /* process credentials */
 	uid_t uid,euid,suid,fsuid;
 	gid_t gid,egid,sgid,fsgid;
@@ -382,7 +381,7 @@
 /* utime */	{0,0,0,0},0, \
 /* per CPU times */ {0, }, {0, }, \
 /* flt */	0,0,0,0,0,0, \
-/* swp */	0,0, \
+/* swp */	0, \
 /* process credentials */					\
 /* uid etc */	0,0,0,0,0,0,0,0,				\
 /* suppl grps*/ 0, {0,},					\
diff -urN 2.2.17pre3/mm/filemap.c 2.2.17pre3-VM/mm/filemap.c
--- 2.2.17pre3/mm/filemap.c	Tue Jun 13 03:48:15 2000
+++ 2.2.17pre3-VM/mm/filemap.c	Sat Jun 17 02:40:47 2000
@@ -145,6 +145,7 @@
 	/* Make sure we scan all pages twice at priority 0. */
 	count = (limit << 1) >> priority;
 
+ refresh_clock:
 	page = mem_map + clock;
 	do {
 		int referenced;
@@ -198,8 +199,12 @@
 		if (page->buffers) {
 			if (buffer_under_min())
 				continue;
+			/*
+			 * We can sleep if we need to do some write
+			 * throttling.
+			 */
 			if (!try_to_free_buffers(page))
-				continue;
+				goto refresh_clock;
 			return 1;
 		}
 
diff -urN 2.2.17pre3/mm/page_alloc.c 2.2.17pre3-VM/mm/page_alloc.c
--- 2.2.17pre3/mm/page_alloc.c	Sat Jun 17 02:38:58 2000
+++ 2.2.17pre3-VM/mm/page_alloc.c	Sat Jun 17 02:40:47 2000
@@ -182,7 +182,6 @@
 unsigned long __get_free_pages(int gfp_mask, unsigned long order)
 {
 	unsigned long flags;
-	static unsigned long last_woke_kswapd = 0;
 	static atomic_t free_before_allocate = ATOMIC_INIT(0);
 
 	if (order >= NR_MEM_LISTS)
@@ -208,41 +207,41 @@
 		int freed;
 		extern struct wait_queue * kswapd_wait;
 
-		if (nr_free_pages > freepages.high)
-			goto ok_to_allocate;
-		
-		/* Maybe wake up kswapd for background swapping. */
-		if (time_before(last_woke_kswapd + HZ, jiffies)) {
-			last_woke_kswapd = jiffies;
-			wake_up_interruptible(&kswapd_wait);
-		}
-
 		/* Somebody needs to free pages so we free some of our own. */
 		if (atomic_read(&free_before_allocate)) {
 			current->flags |= PF_MEMALLOC;
-			freed = try_to_free_pages(gfp_mask);
+			try_to_free_pages(gfp_mask);
 			current->flags &= ~PF_MEMALLOC;
-			if (freed)
-				goto ok_to_allocate;
 		}
 
-		/* Do we have to help kswapd or can we proceed? */
-		if (nr_free_pages < (freepages.min + freepages.low) / 2) {
+		if (nr_free_pages > freepages.low)
+			goto ok_to_allocate;
+
+		if (waitqueue_active(&kswapd_wait))
 			wake_up_interruptible(&kswapd_wait);
 
-			/* Help kswapd a bit... */
-			current->flags |= PF_MEMALLOC;
-			atomic_inc(&free_before_allocate);
-			freed = try_to_free_pages(gfp_mask);
-			atomic_dec(&free_before_allocate);
-			current->flags &= ~PF_MEMALLOC;
+		/* Do we have to block or can we proceed? */
+		if (nr_free_pages > freepages.min)
+			goto ok_to_allocate;
 
-			if (nr_free_pages > freepages.min)
-				goto ok_to_allocate;
+		current->flags |= PF_MEMALLOC;
+		atomic_inc(&free_before_allocate);
+		freed = try_to_free_pages(gfp_mask);
+		atomic_dec(&free_before_allocate);
+		current->flags &= ~PF_MEMALLOC;
+
+		/*
+		 * Re-check we're still low on memory after we blocked
+		 * for some time. Somebody may have released lots of
+		 * memory from under us while we was trying to free
+		 * the pages. We check against pages_high to be sure
+		 * to succeed only if lots of memory is been released.
+		 */
+		if (nr_free_pages > freepages.high)
+			goto ok_to_allocate;
 
-			if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
-				goto nopage;
-		}
+		if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
+			goto nopage;
 	}
 ok_to_allocate:
 	spin_lock_irqsave(&page_alloc_lock, flags);
@@ -311,8 +310,8 @@
 	 * analysis.
 	 */
 	i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
-	if (i < 10)
-		i = 10;
+	if (i < 50)
+		i = 50;
 	if (i > 256)
 		i = 256;
 	freepages.min = i;
diff -urN 2.2.17pre3/mm/vmscan.c 2.2.17pre3-VM/mm/vmscan.c
--- 2.2.17pre3/mm/vmscan.c	Tue Jun 13 03:48:15 2000
+++ 2.2.17pre3-VM/mm/vmscan.c	Sat Jun 17 02:40:47 2000
@@ -333,7 +333,6 @@
 
 	for (; counter >= 0; counter--) {
 		max_cnt = 0;
-		assign = 0;
 		pbest = NULL;
 	select:
 		read_lock(&tasklist_lock);
@@ -378,17 +377,10 @@
  * cluster them so that we get good swap-out behaviour. See
  * the "free_memory()" macro for details.
  */
-#define FLUSH_COUNT	8
 static int do_try_to_free_pages(unsigned int gfp_mask)
 {
-	int priority, count, swapcount;
-	int flushcount = FLUSH_COUNT;
-	int ret = 0;
-
-	/* Kswapd does nothing but freeing pages so we can do big bites. */
-	if (gfp_mask == GFP_KSWAPD)
-		flushcount = SWAP_CLUSTER_MAX;
-	count = flushcount;
+	int priority;
+	int count = SWAP_CLUSTER_MAX;
 
 	lock_kernel();
 
@@ -398,7 +390,6 @@
 	priority = 6;
 	do {
 		while (shrink_mmap(priority, gfp_mask)) {
-			ret = 1;
 			if (!--count)
 				goto done;
 		}
@@ -406,36 +397,27 @@
 		/* Try to get rid of some shared memory pages.. */
 		if (gfp_mask & __GFP_IO) {
 			while (shm_swap(priority, gfp_mask)) {
-				ret = 1;
 				if (!--count)
 					goto done;
 			}
 		}
 
 		/* Then, try to page stuff out.. */
-		swapcount = flushcount;
 		while (swap_out(priority, gfp_mask)) {
-			if (!--swapcount)
-				break;
+			if (!--count)
+				goto done;
 		}
 
 		shrink_dcache_memory(priority, gfp_mask);
 	} while (--priority >= 0);
-
-	/* End with a shrink_mmap() to make sure we free something. */
-	while (shrink_mmap(0, gfp_mask)) {
-		ret = 1;
-		if (!--count)
-			goto done;
-	}
 done:
 	unlock_kernel();
 
-	if (!ret)
+	if (priority < 0)
 		printk("VM: do_try_to_free_pages failed for %s...\n",
 				current->comm);
 	/* Return success if we freed a page. */
-	return ret;
+	return priority >= 0;
 }
 
 /*
@@ -507,18 +489,11 @@
 		 * the processes needing more memory will wake us
 		 * up on a more timely basis.
 		 */
-		int failed = 0;
-sleep:
 		interruptible_sleep_on(&kswapd_wait);
-		/* Enough free pages? -> call do_try_to_free_pages only once. */
-		if (nr_free_pages > freepages.low) {
-			do_try_to_free_pages(GFP_KSWAPD);
-			goto sleep;
-		}
-		/* Not enough free pages? -> free pages agressively. */
+
 		while (nr_free_pages < freepages.high)
 		{
-			if (do_try_to_free_pages(GFP_KSWAPD) && failed++ < 10)
+			if (do_try_to_free_pages(GFP_KSWAPD))
 			{
 				if (tsk->need_resched)
 					schedule();