From: Jinshan Xiong <jinshan.xi...@intel.com>

In vvp_io_read_lock(), it used to decide if to add read lock by
checking lli_has_smd. Accessing lli_has_smd is racy when an empty
file is turned into raid0, therefore, it may result in read requests
are issued without corresponding lock.

Signed-off-by: Jinshan Xiong <jinshan.xi...@intel.com>
Reviewed-on: http://review.whamcloud.com/12139
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5062
Reviewed-by: Bobi Jam <bobi...@gmail.com>
Signed-off-by: Oleg Drokin <oleg.dro...@intel.com>
---
 drivers/staging/lustre/lustre/include/lclient.h    |  1 +
 drivers/staging/lustre/lustre/lclient/lcommon_cl.c |  6 ++++++
 drivers/staging/lustre/lustre/llite/vvp_io.c       | 15 +++++----------
 drivers/staging/lustre/lustre/llite/vvp_lock.c     |  1 +
 4 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/staging/lustre/lustre/include/lclient.h 
b/drivers/staging/lustre/lustre/include/lclient.h
index 316500c..c5c3a8d 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -325,6 +325,7 @@ void ccc_lock_fini(const struct lu_env *env, struct 
cl_lock_slice *slice);
 int ccc_lock_enqueue(const struct lu_env *env,
                     const struct cl_lock_slice *slice,
                     struct cl_io *io, __u32 enqflags);
+int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice 
*slice);
 int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
 int ccc_lock_fits_into(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c 
b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 24d26ab..23095bb 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -586,6 +586,12 @@ int ccc_lock_enqueue(const struct lu_env *env,
        return 0;
 }
 
+int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
+{
+       CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+       return 0;
+}
+
 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
 {
        CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c 
b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f601..ce54b76 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -307,18 +307,13 @@ static int vvp_io_rw_lock(const struct lu_env *env, 
struct cl_io *io,
 static int vvp_io_read_lock(const struct lu_env *env,
                            const struct cl_io_slice *ios)
 {
-       struct cl_io     *io  = ios->cis_io;
-       struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
+       struct cl_io     *io = ios->cis_io;
+       struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
        int result;
 
-       /* XXX: Layer violation, we shouldn't see lsm at llite level. */
-       if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
-               result = vvp_io_rw_lock(env, io, CLM_READ,
-                                       io->u.ci_rd.rd.crw_pos,
-                                       io->u.ci_rd.rd.crw_pos +
-                                       io->u.ci_rd.rd.crw_count - 1);
-       else
-               result = 0;
+       result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
+                               rd->crw_pos + rd->crw_count - 1);
+
        return result;
 }
 
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c 
b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index 372633e..f354e82 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -71,6 +71,7 @@ static const struct cl_lock_operations vvp_lock_ops = {
        .clo_fini      = ccc_lock_fini,
        .clo_enqueue   = ccc_lock_enqueue,
        .clo_wait      = ccc_lock_wait,
+       .clo_use       = ccc_lock_use,
        .clo_unuse     = ccc_lock_unuse,
        .clo_fits_into = ccc_lock_fits_into,
        .clo_state     = ccc_lock_state,
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to