The function get_volume_page_protected may place a request on
a queue for another thread to process asynchronously. When this
happens, the volume should not read the request from the original
thread. This can not currently cause problems, due to the way
request processing is handled, but it is not safe in general.

Reviewed-by: Ken Raeburn <raeb...@redhat.com>
Signed-off-by: Matthew Sakai <msa...@redhat.com>
---
 drivers/md/dm-vdo/indexer/volume.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/drivers/md/dm-vdo/indexer/volume.c 
b/drivers/md/dm-vdo/indexer/volume.c
index 655453bb276b..425b3a74f4db 100644
--- a/drivers/md/dm-vdo/indexer/volume.c
+++ b/drivers/md/dm-vdo/indexer/volume.c
@@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume 
*volume, struct uds_request *
                                     u32 physical_page, struct cached_page 
**page_ptr)
 {
        struct cached_page *page;
+       unsigned int zone_number = request->zone_number;
 
        get_page_from_cache(&volume->page_cache, physical_page, &page);
        if (page != NULL) {
-               if (request->zone_number == 0) {
+               if (zone_number == 0) {
                        /* Only one zone is allowed to update the LRU. */
                        make_page_most_recent(&volume->page_cache, page);
                }
@@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, 
struct uds_request *
        }
 
        /* Prepare to enqueue a read for the page. */
-       end_pending_search(&volume->page_cache, request->zone_number);
+       end_pending_search(&volume->page_cache, zone_number);
        mutex_lock(&volume->read_threads_mutex);
 
        /*
@@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, 
struct uds_request *
                 * the order does not matter for correctness as it does below.
                 */
                mutex_unlock(&volume->read_threads_mutex);
-               begin_pending_search(&volume->page_cache, physical_page,
-                                    request->zone_number);
+               begin_pending_search(&volume->page_cache, physical_page, 
zone_number);
                return UDS_QUEUED;
        }
 
@@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, 
struct uds_request *
         * "search pending" state in careful order so no other thread can mess 
with the data before
         * the caller gets to look at it.
         */
-       begin_pending_search(&volume->page_cache, physical_page, 
request->zone_number);
+       begin_pending_search(&volume->page_cache, physical_page, zone_number);
        mutex_unlock(&volume->read_threads_mutex);
        *page_ptr = page;
        return UDS_SUCCESS;
@@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, 
struct uds_request *r
 {
        int result;
        struct cached_page *page = NULL;
+       unsigned int zone_number = request->zone_number;
        u32 physical_page = map_to_physical_page(volume->geometry, chapter,
                                                 index_page_number);
 
@@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume 
*volume, struct uds_request *r
         * invalidation by the reader thread, before the reader thread has 
noticed that the
         * invalidate_counter has been incremented.
         */
-       begin_pending_search(&volume->page_cache, physical_page, 
request->zone_number);
+       begin_pending_search(&volume->page_cache, physical_page, zone_number);
 
        result = get_volume_page_protected(volume, request, physical_page, 
&page);
        if (result != UDS_SUCCESS) {
-               end_pending_search(&volume->page_cache, request->zone_number);
+               end_pending_search(&volume->page_cache, zone_number);
                return result;
        }
 
        result = uds_search_chapter_index_page(&page->index_page, 
volume->geometry,
                                               &request->record_name,
                                               record_page_number);
-       end_pending_search(&volume->page_cache, request->zone_number);
+       end_pending_search(&volume->page_cache, zone_number);
        return result;
 }
 
@@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, 
struct uds_request *req
 {
        struct cached_page *record_page;
        struct index_geometry *geometry = volume->geometry;
+       unsigned int zone_number = request->zone_number;
        int result;
        u32 physical_page, page_number;
 
@@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, 
struct uds_request *req
         * invalidation by the reader thread, before the reader thread has 
noticed that the
         * invalidate_counter has been incremented.
         */
-       begin_pending_search(&volume->page_cache, physical_page, 
request->zone_number);
+       begin_pending_search(&volume->page_cache, physical_page, zone_number);
 
        result = get_volume_page_protected(volume, request, physical_page, 
&record_page);
        if (result != UDS_SUCCESS) {
-               end_pending_search(&volume->page_cache, request->zone_number);
+               end_pending_search(&volume->page_cache, zone_number);
                return result;
        }
 
@@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, 
struct uds_request *req
                               &request->record_name, geometry, 
&request->old_metadata))
                *found = true;
 
-       end_pending_search(&volume->page_cache, request->zone_number);
+       end_pending_search(&volume->page_cache, zone_number);
        return UDS_SUCCESS;
 }
 
-- 
2.48.1


Reply via email to