@@ -245,6 +245,9 @@ static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
245245 struct cxl_region_params * p = & cxlr -> params ;
246246 int i ;
247247
248+ if (test_bit (CXL_REGION_F_LOCK , & cxlr -> flags ))
249+ return ;
250+
248251 /*
249252 * Before region teardown attempt to flush, evict any data cached for
250253 * this region, or scream loudly about missing arch / platform support
@@ -419,6 +422,9 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
419422 return len ;
420423 }
421424
425+ if (test_bit (CXL_REGION_F_LOCK , & cxlr -> flags ))
426+ return - EPERM ;
427+
422428 rc = queue_reset (cxlr );
423429 if (rc )
424430 return rc ;
@@ -838,16 +844,16 @@ static int match_free_decoder(struct device *dev, const void *data)
838844 return 1 ;
839845}
840846
841- static bool region_res_match_cxl_range (const struct cxl_region_params * p ,
842- const struct range * range )
847+ static bool spa_maps_hpa (const struct cxl_region_params * p ,
848+ const struct range * range )
843849{
844850 if (!p -> res )
845851 return false;
846852
847853 /*
848- * If an extended linear cache region then the CXL range is assumed
849- * to be fronted by the DRAM range in current known implementation.
850- * This assumption will be made until a variant implementation exists .
854+ * The extended linear cache region is constructed by a 1:1 ratio
855+ * where the SPA maps equal amounts of DRAM and CXL HPA capacity with
856+ * CXL decoders at the high end of the SPA range .
851857 */
852858 return p -> res -> start + p -> cache_size == range -> start &&
853859 p -> res -> end == range -> end ;
@@ -865,7 +871,7 @@ static int match_auto_decoder(struct device *dev, const void *data)
865871 cxld = to_cxl_decoder (dev );
866872 r = & cxld -> hpa_range ;
867873
868- if (region_res_match_cxl_range (p , r ))
874+ if (spa_maps_hpa (p , r ))
869875 return 1 ;
870876
871877 return 0 ;
@@ -1059,6 +1065,16 @@ static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
10591065 return 0 ;
10601066}
10611067
1068+ static void cxl_region_set_lock (struct cxl_region * cxlr ,
1069+ struct cxl_decoder * cxld )
1070+ {
1071+ if (!test_bit (CXL_DECODER_F_LOCK , & cxld -> flags ))
1072+ return ;
1073+
1074+ set_bit (CXL_REGION_F_LOCK , & cxlr -> flags );
1075+ clear_bit (CXL_REGION_F_NEEDS_RESET , & cxlr -> flags );
1076+ }
1077+
10621078/**
10631079 * cxl_port_attach_region() - track a region's interest in a port by endpoint
10641080 * @port: port to add a new region reference 'struct cxl_region_ref'
@@ -1170,6 +1186,8 @@ static int cxl_port_attach_region(struct cxl_port *port,
11701186 }
11711187 }
11721188
1189+ cxl_region_set_lock (cxlr , cxld );
1190+
11731191 rc = cxl_rr_ep_add (cxl_rr , cxled );
11741192 if (rc ) {
11751193 dev_dbg (& cxlr -> dev ,
@@ -1465,7 +1483,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
14651483 if (test_bit (CXL_REGION_F_AUTO , & cxlr -> flags )) {
14661484 if (cxld -> interleave_ways != iw ||
14671485 (iw > 1 && cxld -> interleave_granularity != ig ) ||
1468- !region_res_match_cxl_range (p , & cxld -> hpa_range ) ||
1486+ !spa_maps_hpa (p , & cxld -> hpa_range ) ||
14691487 ((cxld -> flags & CXL_DECODER_F_ENABLE ) == 0 )) {
14701488 dev_err (& cxlr -> dev ,
14711489 "%s:%s %s expected iw: %d ig: %d %pr\n" ,
@@ -2439,6 +2457,7 @@ static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int i
24392457 dev -> bus = & cxl_bus_type ;
24402458 dev -> type = & cxl_region_type ;
24412459 cxlr -> id = id ;
2460+ cxl_region_set_lock (cxlr , & cxlrd -> cxlsd .cxld );
24422461
24432462 return cxlr ;
24442463}
@@ -3398,7 +3417,7 @@ static int match_region_by_range(struct device *dev, const void *data)
33983417 p = & cxlr -> params ;
33993418
34003419 guard (rwsem_read )(& cxl_rwsem .region );
3401- return region_res_match_cxl_range (p , r );
3420+ return spa_maps_hpa (p , r );
34023421}
34033422
34043423static int cxl_extended_linear_cache_resize (struct cxl_region * cxlr ,
0 commit comments