 fs/reiser4/flush_queue.c                  |   20 ++++-------
 fs/reiser4/lock.c                         |   25 ++++----------
 fs/reiser4/plugin/file_ops_readdir.c      |    5 --
 fs/reiser4/plugin/item/extent_flush_ops.c |    4 --
 fs/reiser4/txnmgr.c                       |   52 +++++++-----------------------
 fs/reiser4/wander.c                       |   10 +----
 6 files changed, 32 insertions(+), 84 deletions(-)

diff -puN fs/reiser4/flush_queue.c~reiser4-use-for_each_list_entry fs/reiser4/flush_queue.c
--- linux-2.6.14-rc1-mm1/fs/reiser4/flush_queue.c~reiser4-use-for_each_list_entry	2005-09-19 13:20:33.000000000 +0400
+++ linux-2.6.14-rc1-mm1-vs/fs/reiser4/flush_queue.c	2005-09-19 13:20:33.000000000 +0400
@@ -282,15 +282,13 @@ static int finish_fq(flush_queue_t * fq,
 static int finish_all_fq(txn_atom * atom, int *nr_io_errors)
 {
 	flush_queue_t *fq;
-	struct list_head *pos;
 
 	assert("zam-730", spin_atom_is_locked(atom));
 
 	if (list_empty_careful(&atom->flush_queues))
 		return 0;
 
-	list_for_each(pos, &atom->flush_queues) {
-		fq = list_entry(pos, flush_queue_t, alink);
+	list_for_each_entry(fq, &atom->flush_queues, alink) {
 		if (fq_ready(fq)) {
 			int ret;
 
@@ -356,10 +354,8 @@ static void
 scan_fq_and_update_atom_ref(struct list_head *list, txn_atom *atom)
 {
 	jnode *cur;
-	struct list_head *pos;
 
-	list_for_each(pos, list) {
-		cur = list_entry(pos, jnode, capture_link);
+	list_for_each_entry(cur, list, capture_link) {
 		LOCK_JNODE(cur);
 		cur->atom = atom;
 		UNLOCK_JNODE(cur);
@@ -370,13 +366,11 @@ scan_fq_and_update_atom_ref(struct list_
 void fuse_fq(txn_atom *to, txn_atom *from)
 {
 	flush_queue_t *fq;
-	struct list_head *pos;
 
 	assert("zam-720", spin_atom_is_locked(to));
 	assert("zam-721", spin_atom_is_locked(from));
 
-	list_for_each(pos, &from->flush_queues) {
-		fq = list_entry(pos, flush_queue_t, alink);
+	list_for_each_entry(fq, &from->flush_queues, alink) {
 		scan_fq_and_update_atom_ref(ATOM_FQ_LIST(fq), to);
 		spin_lock_fq(fq);
 		fq->atom = to;
@@ -723,13 +717,13 @@ void check_fq(const txn_atom *atom)
 	/* check number of nodes on all atom's flush queues */
 	flush_queue_t *fq;
 	int count;
-	struct list_head *pos1, *pos2;
+	struct list_head *pos;
 
 	count = 0;
-	list_for_each(pos1, &atom->flush_queues) {
-		fq = list_entry(pos1, flush_queue_t, alink);
+	list_for_each_entry(fq, &atom->flush_queues, alink) {
 		spin_lock_fq(fq);
-		list_for_each(pos2, ATOM_FQ_LIST(fq))
+		/* calculate number of jnodes on fq' list of prepped jnodes */
+		list_for_each(pos, ATOM_FQ_LIST(fq))
 			count++;
 		spin_unlock_fq(fq);
 	}
diff -puN fs/reiser4/lock.c~reiser4-use-for_each_list_entry fs/reiser4/lock.c
--- linux-2.6.14-rc1-mm1/fs/reiser4/lock.c~reiser4-use-for_each_list_entry	2005-09-19 13:20:33.000000000 +0400
+++ linux-2.6.14-rc1-mm1-vs/fs/reiser4/lock.c	2005-09-19 14:14:51.000000000 +0400
@@ -230,11 +230,9 @@ lock_stack *get_current_lock_stack(void)
 static void wake_up_all_lopri_owners(znode * node)
 {
 	lock_handle *handle;
-	struct list_head *pos;
 
 	assert("nikita-1824", rw_zlock_is_locked(&node->lock));
-	list_for_each(pos, &node->lock.owners) {
-		handle = list_entry(pos, lock_handle, owners_link);
+	list_for_each_entry(handle, &node->lock.owners, owners_link) {
 		spin_lock_stack(handle->owner);
 
 		assert("nikita-1832", handle->node == node);
@@ -360,7 +358,6 @@ int znode_is_any_locked(const znode * no
 	lock_handle *handle;
 	lock_stack *stack;
 	int ret;
-	struct list_head *pos;
 
 	if (!znode_is_locked(node)) {
 		return 0;
@@ -372,8 +369,7 @@ int znode_is_any_locked(const znode * no
 
 	ret = 0;
 
-	list_for_each(pos, &stack->locks) {
-		handle = list_entry(pos, lock_handle, locks_link);
+	list_for_each_entry(handle, &stack->locks, locks_link) {
 		if (handle->node == node) {
 			ret = 1;
 			break;
@@ -1072,7 +1068,6 @@ void invalidate_lock(lock_handle * handl
 	znode *node = handle->node;
 	lock_stack *owner = handle->owner;
 	lock_stack *rq;
-	struct list_head *pos;
 
 	assert("zam-325", owner == get_current_lock_stack());
 	assert("zam-103", znode_is_write_locked(node));
@@ -1090,10 +1085,8 @@ void invalidate_lock(lock_handle * handl
 	node->lock.nr_readers = 0;
 
 	/* all requestors will be informed that lock is invalidated. */
-	list_for_each(pos, &node->lock.requestors) {
-		rq = list_entry(pos, lock_stack, requestors_link);
+	list_for_each_entry(rq, &node->lock.requestors, requestors_link)
 		reiser4_wake_up(rq);
-	}
 
 	/* We use that each unlock() will wakeup first item from requestors
 	   list; our lock stack is the last one. */
@@ -1279,7 +1272,6 @@ int lock_stack_isclean(lock_stack * owne
 void print_lock_stack(const char *prefix, lock_stack * owner)
 {
 	lock_handle *handle;
-	struct list_head *pos;
 
 	spin_lock_stack(owner);
 
@@ -1296,8 +1288,7 @@ void print_lock_stack(const char *prefix
 
 	printk(".... current locks:\n");
 
-	list_for_each(pos, &owner->locks) {
-		handle = list_entry(pos, lock_handle, locks_link);
+	list_for_each_entry(handle, &owner->locks, locks_link) {
 		if (handle->node != NULL)
 			print_address(znode_is_rlocked(handle->node) ?
 				      "......  read" : "...... write",
@@ -1311,11 +1302,11 @@ void print_lock_stack(const char *prefix
  * debugging functions
  */
 
-void list_check(struct list_head *head)
+static void list_check(struct list_head *head)
 {
 	struct list_head *pos;
 
-	list_for_each (pos, head)
+	list_for_each(pos, head)
 		assert("", (pos->prev != NULL && pos->next != NULL &&
 			    pos->prev->next == pos && pos->next->prev == pos));
 }
@@ -1351,7 +1342,6 @@ request_is_deadlock_safe(znode * node, z
 			 znode_lock_request request)
 {
 	lock_stack *owner;
-	struct list_head *pos;
 
 	owner = get_current_lock_stack();
 	/*
@@ -1362,10 +1352,9 @@ request_is_deadlock_safe(znode * node, z
 	    znode_get_level(node) != 0) {
 		lock_handle *item;
 
-		list_for_each(pos, &owner->locks) {
+		list_for_each_entry(item, &owner->locks, locks_link) {
 			znode *other;
 
-			item = list_entry(pos, lock_handle, locks_link);
 			other = item->node;
 
 			if (znode_get_level(other) == 0)
diff -puN fs/reiser4/txnmgr.c~reiser4-use-for_each_list_entry fs/reiser4/txnmgr.c
--- linux-2.6.14-rc1-mm1/fs/reiser4/txnmgr.c~reiser4-use-for_each_list_entry	2005-09-19 13:20:33.000000000 +0400
+++ linux-2.6.14-rc1-mm1-vs/fs/reiser4/txnmgr.c	2005-09-19 13:20:33.000000000 +0400
@@ -883,10 +883,8 @@ static int atom_should_commit_asap(const
 static jnode *find_first_dirty_in_list(struct list_head *head, int flags)
 {
 	jnode *first_dirty;
-	struct list_head *pos;
 
-	list_for_each(pos, head) {
-		first_dirty = list_entry(pos, jnode, capture_link);
+	list_for_each_entry(first_dirty, head, capture_link) {
 		if (!(flags & JNODE_FLUSH_COMMIT)) {
 			/*
 			 * skip jnodes which "heard banshee" or having active
@@ -1227,7 +1225,6 @@ int txnmgr_force_commit_all(struct super
 	txn_handle *txnh;
 	unsigned long start_time = jiffies;
 	reiser4_context *ctx = get_current_context();
-	struct list_head *pos;
 
 	assert("nikita-2965", lock_stack_isclean(get_current_lock_stack()));
 	assert("nikita-3058", commit_check_locks());
@@ -1242,8 +1239,7 @@ int txnmgr_force_commit_all(struct super
 
 	spin_lock_txnmgr(mgr);
 
-	list_for_each(pos, &mgr->atoms_list) {
-		atom = list_entry(pos, txn_atom, atom_link);
+	list_for_each_entry(atom, &mgr->atoms_list, atom_link) {
 		LOCK_ATOM(atom);
 
 		/* Commit any atom which can be committed.  If @commit_new_atoms
@@ -1369,7 +1365,6 @@ static int txn_try_to_fuse_small_atom(tx
 	int atom_stage;
 	txn_atom *atom_2;
 	int repeat;
-	struct list_head *pos;
 
 	assert("zam-1051", atom->stage < ASTAGE_PRE_COMMIT);
 
@@ -1385,8 +1380,7 @@ static int txn_try_to_fuse_small_atom(tx
 			goto out;
 	}
 
-	list_for_each(pos, &tmgr->atoms_list) {
-		atom_2 = list_entry(pos, txn_atom, atom_link);
+	list_for_each_entry(atom_2, &tmgr->atoms_list, atom_link) {
 		if (atom == atom_2)
 			continue;
 		/*
@@ -1435,7 +1429,6 @@ flush_some_atom(jnode * start, long *nr_
 	txn_handle *txnh = ctx->trans;
 	txn_atom *atom;
 	int ret;
-	struct list_head *pos;
 
 	BUG_ON(wbc->nr_to_write == 0);
 	BUG_ON(*nr_submitted != 0);
@@ -1446,8 +1439,7 @@ flush_some_atom(jnode * start, long *nr_
 		spin_lock_txnmgr(tmgr);
 
 		/* traverse the list of all atoms */
-		list_for_each(pos, &tmgr->atoms_list) {
-			atom = list_entry(pos, txn_atom, atom_link);
+		list_for_each_entry(atom, &tmgr->atoms_list, atom_link) {
 			/* lock atom before checking its state */
 			LOCK_ATOM(atom);
 
@@ -1473,8 +1465,7 @@ flush_some_atom(jnode * start, long *nr_
 		 * flushed/committed.
 		 */
 		if (!current_is_pdflush() && !wbc->nonblocking) {
-			list_for_each(pos, &tmgr->atoms_list) {
-				atom = list_entry(pos, txn_atom, atom_link);
+			list_for_each_entry(atom, &tmgr->atoms_list, atom_link) {
 				LOCK_ATOM(atom);
 				/* Repeat the check from the above. */
 				if (atom->stage < ASTAGE_PRE_COMMIT
@@ -2208,7 +2199,6 @@ static int fuse_not_fused_lock_owners(tx
 	lock_handle *lh;
 	int repeat = 0;
 	txn_atom *atomh = txnh->atom;
-	struct list_head *pos;
 
 /*	assert ("zam-689", znode_is_rlocked (node));*/
 	assert("zam-690", spin_znode_is_locked(node));
@@ -2223,11 +2213,10 @@ static int fuse_not_fused_lock_owners(tx
 	}
 
 	/* inspect list of lock owners */
-	list_for_each(pos, &node->lock.owners) {
+	list_for_each_entry(lh, &node->lock.owners, owners_link) {
 		reiser4_context *ctx;
 		txn_atom *atomf;
 
-		lh = list_entry(pos, lock_handle, owners_link);
 		ctx = get_context_by_lock_stack(lh->owner);
 
 		if (ctx == get_current_context())
@@ -3110,13 +3099,11 @@ int capture_super_block(struct super_blo
 static void wakeup_atom_waitfor_list(txn_atom * atom)
 {
 	txn_wait_links *wlinks;
-	struct list_head *pos;
 
 	assert("umka-210", atom != NULL);
 
 	/* atom is locked */
-	list_for_each(pos, &atom->fwaitfor_list) {
-		wlinks = list_entry(pos, txn_wait_links, _fwaitfor_link);
+	list_for_each_entry(wlinks, &atom->fwaitfor_list, _fwaitfor_link) {
 		if (wlinks->waitfor_cb == NULL ||
 		    wlinks->waitfor_cb(atom, wlinks))
 			/* Wake up. */
@@ -3128,13 +3115,11 @@ static void wakeup_atom_waitfor_list(txn
 static void wakeup_atom_waiting_list(txn_atom * atom)
 {
 	txn_wait_links *wlinks;
-	struct list_head *pos;
 
 	assert("umka-211", atom != NULL);
 
 	/* atom is locked */
-	list_for_each(pos, &atom->fwaiting_list) {
-		wlinks = list_entry(pos, txn_wait_links, _fwaiting_link);
+	list_for_each_entry(wlinks, &atom->fwaiting_list, _fwaiting_link) {
 		if (wlinks->waiting_cb == NULL ||
 		    wlinks->waiting_cb(atom, wlinks))
 			/* Wake up. */
@@ -3343,7 +3328,6 @@ capture_fuse_jnode_lists(txn_atom *large
 {
 	int count = 0;
 	jnode *node;
-	struct list_head *pos;
 
 	assert("umka-218", large != NULL);
 	assert("umka-219", large_head != NULL);
@@ -3352,8 +3336,7 @@ capture_fuse_jnode_lists(txn_atom *large
 	assert("zam-968", spin_atom_is_locked(large));
 
 	/* For every jnode on small's capture list... */
-	list_for_each(pos, small_head) {
-		node = list_entry(pos, jnode, capture_link);
+	list_for_each_entry(node, small_head, capture_link) {
 		count += 1;
 
 		/* With the jnode lock held, update atom pointer. */
@@ -3376,15 +3359,13 @@ capture_fuse_txnh_lists(txn_atom *large,
 {
 	int count = 0;
 	txn_handle *txnh;
-	struct list_head *pos;
 
 	assert("umka-221", large != NULL);
 	assert("umka-222", large_head != NULL);
 	assert("umka-223", small_head != NULL);
 
 	/* Adjust every txnh to the new atom. */
-	list_for_each(pos, small_head) {
-		txnh = list_entry(pos, txn_handle, txnh_link);
+	list_for_each_entry(txnh, small_head, txnh_link) {
 		count += 1;
 
 		/* With the txnh lock held, update atom pointer. */
@@ -3410,7 +3391,6 @@ static void capture_fuse_into(txn_atom *
 	unsigned zcount = 0;
 	unsigned tcount = 0;
 	protected_jnodes *prot_list;
-	struct list_head *pos1, *pos2;
 
 	assert("umka-224", small != NULL);
 	assert("umka-225", small != NULL);
@@ -3445,14 +3425,10 @@ static void capture_fuse_into(txn_atom *
 	    capture_fuse_txnh_lists(large, &large->txnh_list,
 				    &small->txnh_list);
 
-
-	list_for_each(pos1, &small->protected) {
+	list_for_each_entry(prot_list, &small->protected, inatom) {
 		jnode *node;
 
-		prot_list = list_entry(pos1, protected_jnodes, inatom);
-
-		list_for_each(pos2, &prot_list->nodes) {
-			node = list_entry(pos2, jnode, capture_link);
+		list_for_each_entry(node, &prot_list->nodes, capture_link) {
 			zcount += 1;
 
 			LOCK_JNODE(node);
@@ -4228,13 +4204,11 @@ reiser4_block_nr txnmgr_count_deleted_bl
 	reiser4_block_nr result;
 	txn_mgr *tmgr = &get_super_private(reiser4_get_current_sb())->tmgr;
 	txn_atom *atom;
-	struct list_head *pos;
 
 	result = 0;
 
 	spin_lock_txnmgr(tmgr);
-	list_for_each(pos, &tmgr->atoms_list) {
-		atom = list_entry(pos, txn_atom, atom_link);
+	list_for_each_entry(atom, &tmgr->atoms_list, atom_link) {
 		LOCK_ATOM(atom);
 		blocknr_set_iterator(atom, &atom->delete_set,
 				     count_deleted_blocks_actor, &result, 0);
diff -puN fs/reiser4/wander.c~reiser4-use-for_each_list_entry fs/reiser4/wander.c
--- linux-2.6.14-rc1-mm1/fs/reiser4/wander.c~reiser4-use-for_each_list_entry	2005-09-19 13:20:33.000000000 +0400
+++ linux-2.6.14-rc1-mm1-vs/fs/reiser4/wander.c	2005-09-19 13:20:33.000000000 +0400
@@ -1074,12 +1074,9 @@ static int alloc_wandered_blocks(struct 
 static void put_overwrite_set(struct commit_handle *ch)
 {
 	jnode *cur;
-	struct list_head *pos;
 
-	list_for_each(pos, ch->overwrite_set) {
-		cur = list_entry(pos, jnode, capture_link);
+	list_for_each_entry(cur, ch->overwrite_set, capture_link)
 		jrelse_tail(cur);
-	}
 }
 
 /* Count overwrite set size, grab disk space for wandered blocks allocation.
@@ -1750,11 +1747,10 @@ static int check_journal_header(const jn
 /* wait for write completion for all jnodes from given list */
 static int wait_on_jnode_list(struct list_head *head)
 {
-	struct list_head *pos;
+	jnode *scan;
 	int ret = 0;
 
-	list_for_each(pos, head) {
-		jnode *scan = list_entry(pos, jnode, capture_link);
+	list_for_each_entry(scan, head, capture_link) {
 		struct page *pg = jnode_page(scan);
 
 		if (pg) {
diff -puN fs/reiser4/plugin/file_ops_readdir.c~reiser4-use-for_each_list_entry fs/reiser4/plugin/file_ops_readdir.c
--- linux-2.6.14-rc1-mm1/fs/reiser4/plugin/file_ops_readdir.c~reiser4-use-for_each_list_entry	2005-09-19 13:20:33.000000000 +0400
+++ linux-2.6.14-rc1-mm1-vs/fs/reiser4/plugin/file_ops_readdir.c	2005-09-19 13:20:33.000000000 +0400
@@ -103,7 +103,6 @@ adjust_dir_file(struct inode *dir, const
 {
 	reiser4_file_fsdata *scan;
 	dir_pos mod_point;
-	struct list_head *pos;
 
 	assert("nikita-2536", dir != NULL);
 	assert("nikita-2538", de != NULL);
@@ -120,10 +119,8 @@ adjust_dir_file(struct inode *dir, const
 	 * update them.
 	 */
 
-	list_for_each(pos, get_readdir_list(dir)) {
-		scan = list_entry(pos, reiser4_file_fsdata, dir.linkage);
+	list_for_each_entry(scan, get_readdir_list(dir), dir.linkage)
 		adjust_dir_pos(scan->back, &scan->dir.readdir, &mod_point, adj);
-	}
 
 	spin_unlock_inode(dir);
 }
diff -puN fs/reiser4/plugin/item/extent_flush_ops.c~reiser4-use-for_each_list_entry fs/reiser4/plugin/item/extent_flush_ops.c
--- linux-2.6.14-rc1-mm1/fs/reiser4/plugin/item/extent_flush_ops.c~reiser4-use-for_each_list_entry	2005-09-19 13:20:33.000000000 +0400
+++ linux-2.6.14-rc1-mm1-vs/fs/reiser4/plugin/item/extent_flush_ops.c	2005-09-19 13:20:33.000000000 +0400
@@ -699,7 +699,6 @@ assign_real_blocknrs(flush_pos_t *flush_
 	jnode *node;
 	txn_atom *atom;
 	flush_queue_t *fq;
-	struct list_head *pos;
 	int i;
 
 	fq = pos_fq(flush_pos);
@@ -707,8 +706,7 @@ assign_real_blocknrs(flush_pos_t *flush_
 	assert("vs-1468", atom);
 
 	i = 0;
-	list_for_each(pos, protected_nodes) {
-		node = list_entry(pos, jnode, capture_link);
+	list_for_each_entry(node, protected_nodes, capture_link) {
 		LOCK_JNODE(node);
 		assert("vs-1132",
 		       ergo(state == UNALLOCATED_EXTENT,

_
