Trond Myklebust wrote:
> Doh. You're quite right on both accounts. I made a small modification
> to your patch for the rpc_new_task() problem (you forgot to release
> the RPC task if it's not used).
> 
> Linus, please apply for 2.4.1...

Thanks guys! I'm testing it now, and so far, so good. I've merged
Andrew's and Trond's patches into one:

*******************************************
--- linux-2.4.0/fs/nfs/flushd.c.orig       Wed Jan 10 07:18:32 2001
+++ linux-2.4.0/fs/nfs/flushd.c    Wed Jan 10 07:18:38 2001
@@ -55,7 +55,7 @@
 /*
  * Spinlock
  */
-spinlock_t nfs_flushd_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t nfs_flushd_lock = SPIN_LOCK_UNLOCKED;

 /*
  * Local function declarations.
@@ -71,18 +71,17 @@
        int                     status = 0;

        dprintk("NFS: writecache_init\n");
+
+       /* Create the RPC task */
+       if (!(task = rpc_new_task(server->client, NULL,
RPC_TASK_ASYNC)))
+               return -ENOMEM;
+
        spin_lock(&nfs_flushd_lock);
        cache = server->rw_requests;

        if (cache->task)
                goto out_unlock;

-       /* Create the RPC task */
-       status = -ENOMEM;
-       task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC);
-       if (!task)
-               goto out_unlock;
-
        task->tk_calldata = server;

        cache->task = task;
@@ -99,6 +98,7 @@
        return 0;
  out_unlock:
        spin_unlock(&nfs_flushd_lock);
+       rpc_release_task(task);
        return status;
 }

@@ -195,7 +195,9 @@
        if (*q) {
                *q = inode->u.nfs_i.hash_next;
                NFS_FLAGS(inode) &= ~NFS_INO_FLUSH;
+               spin_unlock(&nfs_flushd_lock);
                iput(inode);
+               return;
        }
  out:
        spin_unlock(&nfs_flushd_lock);
*******************************************


Brian O'Keefe
Spinnaker Networks, Inc.
[EMAIL PROTECTED]
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/

Reply via email to