On Wed, Sep 10, 2014 at 10:28:39AM -0400, Jeff Layton wrote:
> The current scheme of using the i_flock list is really difficult to
> manage. Start conversion to a new scheme to eventually replace it.
> 
> We start by adding a new i_flctx to struct inode. For now, it lives in
> parallel with i_flock list, but will eventually replace it. The idea is
> to allocate a structure to sit in that pointer and act as a locus for
> all things file locking.
> 
> We'll manage it with RCU and the i_lock, so that things like the
> break_lease() check can use RCU to check for its presence.

And the plan is to allocate it once and then never destroy it until the
inode's destroyed?

OK, seems like a sensible compromise assuming that typically only a
small nunmber of files are ever locked.

--b.

> 
> Signed-off-by: Jeff Layton <jlay...@primarydata.com>
> ---
>  fs/inode.c         |  3 ++-
>  fs/locks.c         | 38 ++++++++++++++++++++++++++++++++++++++
>  include/linux/fs.h | 13 +++++++++++++
>  3 files changed, 53 insertions(+), 1 deletion(-)
> 
> diff --git a/fs/inode.c b/fs/inode.c
> index 26753ba7b6d6..e87b47ab69a2 100644
> --- a/fs/inode.c
> +++ b/fs/inode.c
> @@ -192,7 +192,7 @@ int inode_init_always(struct super_block *sb, struct 
> inode *inode)
>  #ifdef CONFIG_FSNOTIFY
>       inode->i_fsnotify_mask = 0;
>  #endif
> -
> +     inode->i_flctx = NULL;
>       this_cpu_inc(nr_inodes);
>  
>       return 0;
> @@ -235,6 +235,7 @@ void __destroy_inode(struct inode *inode)
>       BUG_ON(inode_has_buffers(inode));
>       security_inode_free(inode);
>       fsnotify_inode_delete(inode);
> +     locks_free_lock_context(inode->i_flctx);
>       if (!inode->i_nlink) {
>               WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
>               atomic_long_dec(&inode->i_sb->s_remove_count);
> diff --git a/fs/locks.c b/fs/locks.c
> index 735b8d3fa78c..6aa36bf21cc9 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -202,8 +202,43 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
>   */
>  static DEFINE_SPINLOCK(blocked_lock_lock);
>  
> +static struct kmem_cache *flctx_cache __read_mostly;
>  static struct kmem_cache *filelock_cache __read_mostly;
>  
> +static struct file_lock_context *
> +locks_get_lock_context(struct inode *inode)
> +{
> +     struct file_lock_context *new = NULL;
> +
> +     if (inode->i_flctx)
> +             goto out;
> +
> +     new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
> +     if (!new)
> +             goto out;
> +
> +     spin_lock(&inode->i_lock);
> +     if (likely(!inode->i_flctx)) {
> +             spin_lock_init(&new->flc_lock);
> +             INIT_LIST_HEAD(&new->flc_flock);
> +             swap(inode->i_flctx, new);
> +     }
> +     spin_unlock(&inode->i_lock);
> +     if (new)
> +             kmem_cache_free(flctx_cache, new);
> +out:
> +     return inode->i_flctx;
> +}
> +
> +void
> +locks_free_lock_context(struct file_lock_context *ctx)
> +{
> +     if (ctx) {
> +             WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
> +             kmem_cache_free(flctx_cache, ctx);
> +     }
> +}
> +
>  static void locks_init_lock_heads(struct file_lock *fl)
>  {
>       INIT_HLIST_NODE(&fl->fl_link);
> @@ -2621,6 +2656,9 @@ static int __init filelock_init(void)
>  {
>       int i;
>  
> +     flctx_cache = kmem_cache_create("file_lock_ctx",
> +                     sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
> +
>       filelock_cache = kmem_cache_create("file_lock_cache",
>                       sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
>  
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index bb9484ae1eef..090212e26d12 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -594,6 +594,7 @@ struct inode {
>  #endif
>       const struct file_operations    *i_fop; /* former 
> ->i_op->default_file_ops */
>       struct file_lock        *i_flock;
> +     struct file_lock_context        *i_flctx;
>       struct address_space    i_data;
>  #ifdef CONFIG_QUOTA
>       struct dquot            *i_dquot[MAXQUOTAS];
> @@ -933,6 +934,11 @@ struct file_lock {
>       } fl_u;
>  };
>  
> +struct file_lock_context {
> +     spinlock_t              flc_lock;
> +     struct list_head        flc_flock;
> +};
> +
>  /* The following constant reflects the upper bound of the file/locking space 
> */
>  #ifndef OFFSET_MAX
>  #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
> @@ -959,6 +965,7 @@ extern int fcntl_setlease(unsigned int fd, struct file 
> *filp, long arg);
>  extern int fcntl_getlease(struct file *filp);
>  
>  /* fs/locks.c */
> +void locks_free_lock_context(struct file_lock_context *ctx);
>  void locks_free_lock(struct file_lock *fl);
>  extern void locks_init_lock(struct file_lock *);
>  extern struct file_lock * locks_alloc_lock(void);
> @@ -1016,6 +1023,12 @@ static inline int fcntl_getlease(struct file *filp)
>       return 0;
>  }
>  
> +static inline void
> +locks_free_lock_context(struct file_lock_context *ctx)
> +{
> +     return;
> +}
> +
>  static inline void locks_init_lock(struct file_lock *fl)
>  {
>       return;
> -- 
> 1.9.3
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to