patch-2.0.21-2.1.0 linux/fs/locks.c

Next file: linux/fs/minix/dir.c
Previous file: linux/fs/hpfs/hpfs_fs.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file lx2.0/v2.0.21/linux/fs/locks.c linux/fs/locks.c
@@ -28,22 +28,22 @@
  *  dynamically with kmalloc()/kfree().
  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  *
- *  Implemented two lock personalities - F_FLOCK and F_POSIX.
+ *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  *
- *  F_POSIX locks are created with calls to fcntl() and lockf() through the
+ *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
  *  fcntl() system call. They have the semantics described above.
  *
- *  F_FLOCK locks are created with calls to flock(), through the flock()
+ *  FL_FLOCK locks are created with calls to flock(), through the flock()
  *  system call, which is new. Old C libraries implement flock() via fcntl()
  *  and will continue to use the old, broken implementation.
  *
- *  F_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
+ *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  *  with a file pointer (filp). As a result they can be shared by a parent
  *  process and its children after a fork(). They are removed when the last
  *  file descriptor referring to the file pointer is closed (unless explicitly
  *  unlocked). 
  *
- *  F_FLOCK locks never deadlock, an existing lock is always removed before
+ *  FL_FLOCK locks never deadlock, an existing lock is always removed before
  *  upgrading from shared to exclusive (or vice versa). When this happens
  *  any processes blocked by the current lock are woken up and allowed to
  *  run before the new lock is applied.
@@ -76,15 +76,24 @@
  *  flock() and fcntl().
  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  *
- *  Allow only one type of locking scheme (F_POSIX or F_FLOCK) to be in use
- *  for a given file at a time. Changed the CONFIG_MANDATORY_OPTION scheme to
+ *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
+ *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  *  guarantee sensible behaviour in the case where file system modules might
  *  be compiled with different options than the kernel itself.
  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  *
- *  Added a couple of missing wake_up() calls.
+ *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
+ *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  *
+ *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
+ *  locks. Changed process synchronisation to avoid dereferencing locks that
+ *  have already been freed.
+ *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
+ *
+ *  Made the block list a circular list to minimise searching in the list.
+ *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
+ *
  *  TODO: Do not honour mandatory locks on remote file systems. This matches
  *        the SVR4 semantics and neatly sidesteps a pile of awkward issues that
  *        would otherwise have to be addressed.
@@ -124,70 +133,101 @@
 
 static struct file_lock *locks_alloc_lock(struct file_lock *fl);
 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
-static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
+static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait);
 static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx);
 
 static struct file_lock *file_lock_table = NULL;
-static struct file_lock *unused_file_locks = NULL;
 
-/*
- * Free lock not inserted in any queue
- *
- * Careful! We can't just "kfree()" it: there may be other processes
- * that have yet to remove themselves from the wait queues. Thus the
- * internal memory management.
+/* Free lock not inserted in any queue.
  */
 static inline void locks_free_lock(struct file_lock *fl)
 {
-	struct file_lock *next = unused_file_locks;
-	unused_file_locks = fl;
-	fl->fl_next = next;
-}
-
-/* Add lock fl to the blocked list pointed to by block.
- * We search to the end of the existing list and insert the the new
- * struct. This ensures processes will be woken up in the order they
- * blocked.
- * NOTE: nowhere does the documentation insist that processes be woken
- * up in this order, but it seems like the reasonable thing to do.
- * If the blocked list gets long then this search could get expensive,
- * in which case we could consider waking the processes up in reverse
- * order, or making the blocked list a doubly linked circular list.
- * 
- * This functions are called only from one place (flock_lock_file)
- * so they are inlined now. -- Dmitry Gorodchanin 02/09/96.
- */
-
-static inline void locks_insert_block(struct file_lock *bfl, 
-				      struct file_lock *fl)
+	if (waitqueue_active(&fl->fl_wait))
+		panic("Aarggh: attempting to free lock with active wait queue - shoot Andy");
+
+	if (fl->fl_nextblock != NULL || fl->fl_prevblock != NULL)
+		panic("Aarggh: attempting to free lock with active block list - shoot Andy");
+		
+	kfree(fl);
+	return;
+}
+
+/* Insert waiter into blocker's block list.
+ * We use a circular list so that processes can be easily woken up in
+ * the order they blocked. The documentation doesn't require this but
+ * it seems seems like the reasonable thing to do.
+ */
+static inline void locks_insert_block(struct file_lock *blocker, 
+				      struct file_lock *waiter)
 {
-	while (bfl->fl_block != NULL) {
-		bfl = bfl->fl_block;
-	}
+	struct file_lock *prevblock;
 
-	bfl->fl_block = fl;
-	fl->fl_block = NULL;
+	if (blocker->fl_prevblock == NULL)
+		/* No previous waiters - list is empty */
+		prevblock = blocker;
+	else
+		/* Previous waiters exist - add to end of list */
+		prevblock = blocker->fl_prevblock;
+
+	prevblock->fl_nextblock = waiter;
+	blocker->fl_prevblock = waiter;
+	waiter->fl_nextblock = blocker;
+	waiter->fl_prevblock = prevblock;
 	
 	return;
 }
 
-static inline void locks_delete_block(struct file_lock *bfl,
-				      struct file_lock *fl)
+/* Remove waiter from blocker's block list.
+ * When blocker ends up pointing to itself then the list is empty.
+ */
+static inline void locks_delete_block(struct file_lock *blocker,
+				      struct file_lock *waiter)
 {
-	struct file_lock *tfl;
+	struct file_lock *nextblock;
+	struct file_lock *prevblock;
 	
-	while ((tfl = bfl->fl_block) != NULL) {
-		if (tfl == fl) {
-			bfl->fl_block = fl->fl_block;
-			fl->fl_block = NULL;
-			return;
-		}
-		bfl = tfl;
+	nextblock = waiter->fl_nextblock;
+	prevblock = waiter->fl_prevblock;
+
+	if (nextblock == NULL)
+		return;
+	
+	nextblock->fl_prevblock = prevblock;
+	prevblock->fl_nextblock = nextblock;
+
+	waiter->fl_prevblock = waiter->fl_nextblock = NULL;
+	if (blocker->fl_nextblock == blocker)
+		/* No more locks on blocker's blocked list */
+		blocker->fl_prevblock = blocker->fl_nextblock = NULL;
+	return;
+}
+
+/* Wake up processes blocked waiting for blocker.
+ * If told to wait then schedule the processes until the block list
+ * is empty, otherwise empty the block list ourselves.
+ */
+static inline void locks_wake_up_blocks(struct file_lock *blocker,
+					unsigned int wait)
+{
+	struct file_lock *waiter;
+
+	while ((waiter = blocker->fl_nextblock) != NULL) {
+		wake_up(&waiter->fl_wait);
+		if (wait)
+			/* Let the blocked process remove waiter from the
+			 * block list when it gets scheduled.
+			 */
+			schedule();
+		else
+			/* Remove waiter from the block list, because by the
+			 * time it wakes up blocker won't exist any more.
+			 */
+			locks_delete_block(blocker, waiter);
 	}
 	return;
 }
 
-/* flock() system call entry point. Apply a FLOCK style lock to
+/* flock() system call entry point. Apply a FL_FLOCK style lock to
  * an open file descriptor.
  */
 asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
@@ -203,8 +243,8 @@
 	
 	if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3))
 		return (-EBADF);
-	
-	return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
+
+	return (flock_lock_file(filp, &file_lock, (cmd & (LOCK_UN | LOCK_NB)) ? 0 : 1));
 }
 
 /* Report the first existing lock that would conflict with l.
@@ -231,7 +271,7 @@
 	if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
 		return (-EINVAL);
 
-	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_POSIX)) { 
+	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX)) { 
 		while (fl != NULL) {
 			if (posix_locks_conflict(&file_lock, fl)) {
 				flock.l_pid = fl->fl_owner->pid;
@@ -283,7 +323,8 @@
 	/* Don't allow mandatory locks on files that may be memory mapped
 	 * and shared.
 	 */
-	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) {
+	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+	    inode->i_mmap) {
 		struct vm_area_struct *vma = inode->i_mmap;
 		do {
 			if (vma->vm_flags & VM_MAYSHARE)
@@ -343,7 +384,7 @@
 	 * close on that file.
 	 */
 	if ((fl = filp->f_inode->i_flock) != NULL) {
-		if (fl->fl_flags & F_POSIX)
+		if (fl->fl_flags & FL_POSIX)
 			posix_remove_locks(&filp->f_inode->i_flock, task);
 		else
 			flock_remove_locks(&filp->f_inode->i_flock, filp);
@@ -395,14 +436,14 @@
 int locks_verify_area(int read_write, struct inode *inode, struct file *filp,
 		      unsigned int offset, unsigned int count)
 {
-#ifdef CONFIG_LOCK_MANDATORY	 
+#ifdef CONFIG_LOCK_MANDATORY
 	/* Candidates for mandatory locking have the setgid bit set
 	 * but no group execute bit -  an otherwise meaningless combination.
 	 */
 	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
 		return (locks_mandatory_area(read_write, inode, filp, offset,
 					     count));
-#endif					     
+#endif
 	return (0);
 }
 
@@ -413,7 +454,7 @@
 
 	/* Search the lock list for this inode for any POSIX locks.
 	 */
-	if ((fl = inode->i_flock) && (fl->fl_flags & F_FLOCK))
+	if ((fl = inode->i_flock) == NULL || (fl->fl_flags & FL_FLOCK))
 		return (0);
 	
 	while (fl != NULL) {
@@ -431,11 +472,25 @@
 {
 #ifdef CONFIG_LOCK_MANDATORY	
 	struct file_lock *fl;
+	struct file_lock tfl;
+
+	tfl.fl_file = filp;
+	tfl.fl_nextlink = NULL;
+	tfl.fl_prevlink = NULL;
+	tfl.fl_next = NULL;
+	tfl.fl_nextblock = NULL;
+	tfl.fl_prevblock = NULL;
+	tfl.fl_flags = FL_POSIX | FL_ACCESS;
+	tfl.fl_owner = current;
+	tfl.fl_wait = NULL;
+	tfl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
+	tfl.fl_start = offset;
+	tfl.fl_end = offset + count - 1;
 
 repeat:
-	/* Check that there are locks, and that they're not F_FLOCK locks.
+	/* Check that there are locks, and that they're not FL_FLOCK locks.
 	 */
-	if ((fl = inode->i_flock) && (fl->fl_flags & F_FLOCK))
+	if ((fl = inode->i_flock) == NULL || (fl->fl_flags & FL_FLOCK))
 		return (0);
 	
 	/*
@@ -459,7 +514,11 @@
 				return (-ERESTARTSYS);
 			if (posix_locks_deadlock(current, fl->fl_owner))
 				return (-EDEADLK);
-			interruptible_sleep_on(&fl->fl_wait);
+
+			locks_insert_block(fl, &tfl);
+			interruptible_sleep_on(&tfl.fl_wait);
+			locks_delete_block(fl, &tfl);
+
 			if (current->signal & ~current->blocked)
 				return (-ERESTARTSYS);
 			/*
@@ -485,7 +544,7 @@
 {
 	off_t start;
 
-	fl->fl_flags = F_POSIX;
+	fl->fl_flags = FL_POSIX;
 
 	switch (l->l_type) {
 	case F_RDLCK :
@@ -495,11 +554,11 @@
 		break;
 	case F_SHLCK :
 		fl->fl_type = F_RDLCK;
-		fl->fl_flags |= F_BROKEN;
+		fl->fl_flags |= FL_BROKEN;
 		break;
 	case F_EXLCK :
 		fl->fl_type = F_WRLCK;
-		fl->fl_flags |= F_BROKEN;
+		fl->fl_flags |= FL_BROKEN;
 		break;
 	default :
 		return (0);
@@ -528,7 +587,7 @@
 	fl->fl_file = filp;
 	fl->fl_owner = current;
 	fl->fl_wait = NULL;		/* just for cleanliness */
-	
+
 	return (1);
 }
 
@@ -555,7 +614,7 @@
 		return (0);
 	}
 
-	fl->fl_flags = F_FLOCK;
+	fl->fl_flags = FL_FLOCK;
 	fl->fl_start = 0;
 	fl->fl_end = OFFSET_MAX;
 	fl->fl_file = filp;
@@ -637,27 +696,23 @@
 static int posix_locks_deadlock(struct task_struct *my_task,
 				struct task_struct *blocked_task)
 {
-	struct wait_queue *dlock_wait;
 	struct file_lock *fl;
+	struct file_lock *bfl;
 
 next_task:
 	if (my_task == blocked_task)
 		return (1);
 	for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
-		struct wait_queue *head;
-		if (fl->fl_owner == NULL || fl->fl_wait == NULL)
+		if (fl->fl_owner == NULL || fl->fl_nextblock == NULL)
 			continue;
-		head = WAIT_QUEUE_HEAD(&fl->fl_wait);
-		dlock_wait = fl->fl_wait;
-		while (dlock_wait != head) {
-			if (dlock_wait->task == blocked_task) {
+		for (bfl = fl->fl_nextblock; bfl != fl; bfl = bfl->fl_nextblock) {
+			if (bfl->fl_owner == blocked_task) {
 				if (fl->fl_owner == my_task) {
 					return (1);
 				}
 				blocked_task = fl->fl_owner;
 				goto next_task;
 			}
-			dlock_wait = dlock_wait->next;
 		}
 	}
 	return (0);
@@ -676,7 +731,7 @@
 
 	before = &filp->f_inode->i_flock;
 
-	if ((fl = *before) && (fl->fl_flags & F_POSIX))
+	if ((fl = *before) && (fl->fl_flags & FL_POSIX))
 		return (-EBUSY);
 
 	while ((fl = *before) != NULL) {
@@ -698,7 +753,7 @@
 	if ((new_fl = locks_alloc_lock(caller)) == NULL)
 		return (-ENOLCK);
 repeat:
-	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_POSIX)) {
+	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX)) {
 		locks_free_lock(new_fl);
 		return (-EBUSY);
 	}
@@ -720,7 +775,7 @@
 			}
 			locks_insert_block(fl, new_fl);
 			interruptible_sleep_on(&new_fl->fl_wait);
-			wake_up(&new_fl->fl_wait);
+			locks_delete_block(fl, new_fl);
 			if (current->signal & ~current->blocked) {
 				/* If we are here, than we were awakened
 				 * by a signal, so new_fl is still in the
@@ -728,7 +783,6 @@
 				 * new_fl and then free it.
 				 * 	Dmitry Gorodchanin 09/02/96.
 				 */
-				locks_delete_block(fl, new_fl);
 				locks_free_lock(new_fl);
 				return (-ERESTARTSYS);
 			}
@@ -763,7 +817,7 @@
 	int added = 0;
 
 repeat:
-	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_FLOCK))
+	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_FLOCK))
 		return (-EBUSY);
 
 	if (caller->fl_type != F_UNLCK) {
@@ -775,7 +829,9 @@
 					return (-ERESTARTSYS);
 				if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
 					return (-EDEADLK);
-				interruptible_sleep_on(&fl->fl_wait);
+				locks_insert_block(fl, caller);
+				interruptible_sleep_on(&caller->fl_wait);
+				locks_delete_block(fl, caller);
 				if (current->signal & ~current->blocked)
 					return (-ERESTARTSYS);
 				goto repeat;
@@ -862,7 +918,7 @@
 				 * as the change in lock type might satisfy
 				 * their needs.
 				 */
-				wake_up(&fl->fl_wait);
+				locks_wake_up_blocks(fl, 0);
 				fl->fl_start = caller->fl_start;
 				fl->fl_end = caller->fl_end;
 				fl->fl_type = caller->fl_type;
@@ -897,52 +953,47 @@
 			locks_insert_lock(before, left);
 		}
 		right->fl_start = caller->fl_end + 1;
-		wake_up(&right->fl_wait);
+		locks_wake_up_blocks(right, 0);
 	}
 	if (left) {
 		left->fl_end = caller->fl_start - 1;
-		wake_up(&left->fl_wait);
+		locks_wake_up_blocks(left, 0);
 	}
 	return (0);
 }
 
-/* Allocate memory for a new lock and initialize its fields from
- * fl. The lock is not inserted into any lists until locks_insert_lock()
- * or locks_insert_block() are called.
+/* Allocate new lock.
+ * Initialize its fields from fl. The lock is not inserted into any
+ * lists until locks_insert_lock() or locks_insert_block() are called.
  */
-
 static struct file_lock *locks_alloc_lock(struct file_lock *fl)
 {
-	struct file_lock *retval;
+	struct file_lock *tmp;
 
-	retval = unused_file_locks;
-	if (retval) {
-		unused_file_locks = retval->fl_next;
-		goto init_file_lock;
-	}
-	retval = (struct file_lock *)
-		kmalloc(sizeof(struct file_lock), GFP_ATOMIC);
-	if (retval) {
-		retval->fl_wait = NULL;
-init_file_lock:
-		retval->fl_next = NULL;
-		retval->fl_nextlink = NULL;
-		retval->fl_prevlink = NULL;
-		retval->fl_block = NULL;
-		retval->fl_owner = fl->fl_owner;
-		retval->fl_file = fl->fl_file;
-		retval->fl_flags = fl->fl_flags;
-		retval->fl_type = fl->fl_type;
-		retval->fl_start = fl->fl_start;
-		retval->fl_end = fl->fl_end;		
-	}
-	return retval;
+	/* Okay, let's make a new file_lock structure... */
+	if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
+					       GFP_ATOMIC)) == NULL)
+		return (tmp);
+
+	tmp->fl_nextlink = NULL;
+	tmp->fl_prevlink = NULL;
+	tmp->fl_next = NULL;
+	tmp->fl_nextblock = NULL;
+	tmp->fl_prevblock = NULL;
+	tmp->fl_flags = fl->fl_flags;
+	tmp->fl_owner = fl->fl_owner;
+	tmp->fl_file = fl->fl_file;
+	tmp->fl_wait = NULL;
+	tmp->fl_type = fl->fl_type;
+	tmp->fl_start = fl->fl_start;
+	tmp->fl_end = fl->fl_end;
+
+	return (tmp);
 }
 
 /* Insert file lock fl into an inode's lock list at the position indicated
  * by pos. At the same time add the lock to the global file lock list.
  */
-
 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
 {
 	fl->fl_nextlink = file_lock_table;
@@ -957,43 +1008,32 @@
 }
 
 /* Delete a lock and free it.
- * First remove our lock from the lock lists. Then remove all the blocked
- * locks from our blocked list, waking up the processes that own them. If
- * told to wait, then sleep on each of these lock's wait queues. Each
- * blocked process will wake up and immediately wake up its own wait queue
- * allowing us to be scheduled again. Lastly, wake up our own wait queue
- * before freeing the file_lock structure.
+ * First remove our lock from the active lock lists. Then call
+ * locks_wake_up_blocks() to wake up processes that are blocked
+ * waiting for this lock. Finally free the lock structure.
  */
-
-static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
+static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait)
 {
-	struct file_lock *fl;
-	struct file_lock *pfl;
-	struct file_lock *nfl;
+	struct file_lock *thisfl;
+	struct file_lock *prevfl;
+	struct file_lock *nextfl;
 	
-	fl = *fl_p;
-	*fl_p = fl->fl_next;
-	pfl = fl->fl_prevlink;
-	nfl = fl->fl_nextlink;
+	thisfl = *thisfl_p;
+	*thisfl_p = thisfl->fl_next;
+
+	prevfl = thisfl->fl_prevlink;
+	nextfl = thisfl->fl_nextlink;
 
-	if (nfl != NULL)
-		nfl->fl_prevlink = pfl;
+	if (nextfl != NULL)
+		nextfl->fl_prevlink = prevfl;
 
-	if (pfl != NULL)
-		pfl->fl_nextlink = nfl;
+	if (prevfl != NULL)
+		prevfl->fl_nextlink = nextfl;
 	else
-		file_lock_table = nfl;
+		file_lock_table = nextfl;
 	
-	while ((nfl = fl->fl_block) != NULL) {
-		fl->fl_block = nfl->fl_block;
-		nfl->fl_block = NULL;
-		wake_up(&nfl->fl_wait);
-		if (wait)
-			sleep_on(&nfl->fl_wait);
-	}
-
-	wake_up(&fl->fl_wait);
-	locks_free_lock(fl);
+	locks_wake_up_blocks(thisfl, wait);
+	locks_free_lock(thisfl);
 
 	return;
 }
@@ -1001,18 +1041,21 @@
 
 static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx)
 {
-	struct wait_queue *wt;
+	struct inode *inode;
+
+	inode = fl->fl_file->f_inode;
 
 	p += sprintf(p, "%d:%s ", id, pfx);
-	if (fl->fl_flags & F_POSIX) {
+	if (fl->fl_flags & FL_POSIX) {
 #ifdef CONFIG_LOCK_MANDATORY	 
 		p += sprintf(p, "%s %s ",
-			     (fl->fl_flags & F_BROKEN) ? "BROKEN" : "POSIX ",
-			     ((fl->fl_file->f_inode->i_mode & (S_IXGRP | S_ISGID))
-			      == S_ISGID) ? "MANDATORY" : "ADVISORY ");
+			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" :
+			     ((fl->fl_flags & FL_BROKEN) ? "BROKEN" : "POSIX "),
+			     ((inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ?
+			     "MANDATORY" : "ADVISORY ");
 #else
 		p += sprintf(p, "%s ADVISORY ",
-			     (fl->fl_flags & F_BROKEN) ? "BROKEN" : "POSIX ");
+			     (fl->fl_flags & FL_BROKEN) ? "BROKEN" : "POSIX ");
 #endif
 	}
 	else {
@@ -1021,20 +1064,11 @@
 	p += sprintf(p, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE");
 	p += sprintf(p, "%d %s:%ld %ld %ld ",
 		     fl->fl_owner ? fl->fl_owner->pid : 0,
-		     kdevname(fl->fl_file->f_inode->i_dev),
-		     fl->fl_file->f_inode->i_ino, fl->fl_start,
+		     kdevname(inode->i_dev), inode->i_ino, fl->fl_start,
 		     fl->fl_end);
-	p += sprintf(p, "%08lx %08lx %08lx %08lx %08lx\n%d:%s",
+	p += sprintf(p, "%08lx %08lx %08lx %08lx %08lx\n",
 		     (long)fl, (long)fl->fl_prevlink, (long)fl->fl_nextlink,
-		     (long)fl->fl_next, (long)fl->fl_block, id, pfx);
-	if ((wt = fl->fl_wait) != NULL) {
-		struct wait_queue *head = WAIT_QUEUE_HEAD(&fl->fl_wait);
-		while (wt != head) {
-			p += sprintf(p, " %d", wt->task->pid);
-			wt = wt->next;
-		}
-	}
-	p += sprintf(p, "\n");
+		     (long)fl->fl_next, (long)fl->fl_nextblock);
 	return (p);
 }
 
@@ -1048,8 +1082,11 @@
 	p = buf;
 	for (fl = file_lock_table, i = 1; fl != NULL; fl = fl->fl_nextlink, i++) {
 		p = lock_get_status(fl, p, i, "");
-		for (bfl = fl; bfl->fl_block != NULL; bfl = bfl->fl_block)
-			p = lock_get_status(bfl->fl_block, p, i, " ->");
+		if ((bfl = fl->fl_nextblock) == NULL)
+			continue;
+		do {
+			p = lock_get_status(bfl, p, i, " ->");
+		} while ((bfl = bfl->fl_nextblock) != fl);
 	}
 	return  (p - buf);
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov