If CXL regions do not fully cover a Soft Reserved span, HMEM takes ownership. Tear down overlapping CXL regions before allowing HMEM to register and online the memory.
Add cxl_region_teardown() to walk CXL regions overlapping a span and unregister them via devm_release_action() and unregister_region(). Force the region state back to CXL_CONFIG_ACTIVE before unregistering to prevent the teardown path from resetting decoders HMEM still relies on to create its dax and online memory. Co-developed-by: Alison Schofield <[email protected]> Signed-off-by: Alison Schofield <[email protected]> Signed-off-by: Smita Koralahalli <[email protected]> --- drivers/cxl/core/region.c | 38 ++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxl.h | 5 +++++ drivers/dax/hmem/hmem.c | 4 +++- 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 38e7ec6a087b..266b24028df0 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -3784,6 +3784,44 @@ struct cxl_range_ctx { bool found; }; +static int cxl_region_teardown_cb(struct device *dev, void *data) +{ + struct cxl_range_ctx *ctx = data; + struct cxl_root_decoder *cxlrd; + struct cxl_region_params *p; + struct cxl_region *cxlr; + struct cxl_port *port; + + cxlr = cxlr_overlapping_range(dev, ctx->start, ctx->end); + if (!cxlr) + return 0; + + cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + port = cxlrd_to_port(cxlrd); + p = &cxlr->params; + + /* Force the region state back to CXL_CONFIG_ACTIVE so that + * unregister_region() does not run the full decoder reset path + * which would invalidate the decoder programming that HMEM + * relies on to create its DAX device and online the underlying + * memory. + */ + scoped_guard(rwsem_write, &cxl_rwsem.region) + p->state = min(p->state, CXL_CONFIG_ACTIVE); + + devm_release_action(port->uport_dev, unregister_region, cxlr); + + return 0; +} + +void cxl_region_teardown(resource_size_t start, resource_size_t end) +{ + struct cxl_range_ctx ctx = { .start = start, .end = end }; + + bus_for_each_dev(&cxl_bus_type, NULL, &ctx, cxl_region_teardown_cb); +} +EXPORT_SYMBOL_GPL(cxl_region_teardown); + static void cxl_region_enable_dax(struct cxl_region *cxlr) { struct cxl_region_params *p = &cxlr->params; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 414ddf6c35d7..a215a88ef59c 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -880,6 +880,7 @@ struct cxl_dax_region *to_cxl_dax_region(struct device *dev); u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa); bool cxl_regions_fully_map(resource_size_t start, resource_size_t end); void cxl_register_dax(resource_size_t start, resource_size_t end); +void cxl_region_teardown(resource_size_t start, resource_size_t end); #else static inline bool is_cxl_pmem_region(struct device *dev) { @@ -911,6 +912,10 @@ static inline void cxl_register_dax(resource_size_t start, resource_size_t end) { } +static inline void cxl_region_teardown(resource_size_t start, + resource_size_t end) +{ +} #endif void cxl_endpoint_parse_cdat(struct cxl_port *port); diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c index b9312e0f2e62..7d874ee169ac 100644 --- a/drivers/dax/hmem/hmem.c +++ b/drivers/dax/hmem/hmem.c @@ -158,8 +158,10 @@ static int handle_deferred_cxl(struct device *host, int target_nid, if (cxl_regions_fully_map(res->start, res->end)) { dax_cxl_mode = DAX_CXL_MODE_DROP; cxl_register_dax(res->start, res->end); - } else + } else { dax_cxl_mode = DAX_CXL_MODE_REGISTER; + cxl_region_teardown(res->start, res->end); + } hmem_register_device(host, target_nid, res); } -- 2.17.1

