@@ -246,16 +246,16 @@ static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
246246{
247247 thread_id_t thread_id = vdo_get_callback_thread_id ();
248248
249- ASSERT_LOG_ONLY ((thread_id == cache -> zone -> thread_id ),
250- "%s() must only be called on cache thread %d, not thread %d" ,
251- function_name , cache -> zone -> thread_id , thread_id );
249+ VDO_ASSERT_LOG_ONLY ((thread_id == cache -> zone -> thread_id ),
250+ "%s() must only be called on cache thread %d, not thread %d" ,
251+ function_name , cache -> zone -> thread_id , thread_id );
252252}
253253
254254/** assert_io_allowed() - Assert that a page cache may issue I/O. */
255255static inline void assert_io_allowed (struct vdo_page_cache * cache )
256256{
257- ASSERT_LOG_ONLY (!vdo_is_state_quiescent (& cache -> zone -> state ),
258- "VDO page cache may issue I/O" );
257+ VDO_ASSERT_LOG_ONLY (!vdo_is_state_quiescent (& cache -> zone -> state ),
258+ "VDO page cache may issue I/O" );
259259}
260260
261261/** report_cache_pressure() - Log and, if enabled, report cache pressure. */
@@ -287,9 +287,9 @@ static const char * __must_check get_page_state_name(enum vdo_page_buffer_state
287287
288288 BUILD_BUG_ON (ARRAY_SIZE (state_names ) != PAGE_STATE_COUNT );
289289
290- result = ASSERT (state < ARRAY_SIZE (state_names ),
291- "Unknown page_state value %d" , state );
292- if (result != UDS_SUCCESS )
290+ result = VDO_ASSERT (state < ARRAY_SIZE (state_names ),
291+ "Unknown page_state value %d" , state );
292+ if (result != VDO_SUCCESS )
293293 return "[UNKNOWN PAGE STATE]" ;
294294
295295 return state_names [state ];
@@ -378,8 +378,8 @@ static int __must_check set_info_pbn(struct page_info *info, physical_block_numb
378378 struct vdo_page_cache * cache = info -> cache ;
379379
380380 /* Either the new or the old page number must be NO_PAGE. */
381- int result = ASSERT ((pbn == NO_PAGE ) || (info -> pbn == NO_PAGE ),
382- "Must free a page before reusing it." );
381+ int result = VDO_ASSERT ((pbn == NO_PAGE ) || (info -> pbn == NO_PAGE ),
382+ "Must free a page before reusing it." );
383383 if (result != VDO_SUCCESS )
384384 return result ;
385385
@@ -401,13 +401,13 @@ static int reset_page_info(struct page_info *info)
401401{
402402 int result ;
403403
404- result = ASSERT (info -> busy == 0 , "VDO Page must not be busy" );
405- if (result != UDS_SUCCESS )
404+ result = VDO_ASSERT (info -> busy == 0 , "VDO Page must not be busy" );
405+ if (result != VDO_SUCCESS )
406406 return result ;
407407
408- result = ASSERT (!vdo_waitq_has_waiters (& info -> waiting ),
409- "VDO Page must not have waiters" );
410- if (result != UDS_SUCCESS )
408+ result = VDO_ASSERT (!vdo_waitq_has_waiters (& info -> waiting ),
409+ "VDO Page must not have waiters" );
410+ if (result != VDO_SUCCESS )
411411 return result ;
412412
413413 result = set_info_pbn (info , NO_PAGE );
@@ -592,29 +592,29 @@ static int __must_check validate_completed_page(struct vdo_page_completion *comp
592592{
593593 int result ;
594594
595- result = ASSERT (completion -> ready , "VDO Page completion not ready" );
596- if (result != UDS_SUCCESS )
595+ result = VDO_ASSERT (completion -> ready , "VDO Page completion not ready" );
596+ if (result != VDO_SUCCESS )
597597 return result ;
598598
599- result = ASSERT (completion -> info != NULL ,
600- "VDO Page Completion must be complete" );
601- if (result != UDS_SUCCESS )
599+ result = VDO_ASSERT (completion -> info != NULL ,
600+ "VDO Page Completion must be complete" );
601+ if (result != VDO_SUCCESS )
602602 return result ;
603603
604- result = ASSERT (completion -> info -> pbn == completion -> pbn ,
605- "VDO Page Completion pbn must be consistent" );
606- if (result != UDS_SUCCESS )
604+ result = VDO_ASSERT (completion -> info -> pbn == completion -> pbn ,
605+ "VDO Page Completion pbn must be consistent" );
606+ if (result != VDO_SUCCESS )
607607 return result ;
608608
609- result = ASSERT (is_valid (completion -> info ),
610- "VDO Page Completion page must be valid" );
611- if (result != UDS_SUCCESS )
609+ result = VDO_ASSERT (is_valid (completion -> info ),
610+ "VDO Page Completion page must be valid" );
611+ if (result != VDO_SUCCESS )
612612 return result ;
613613
614614 if (writable ) {
615- result = ASSERT (completion -> writable ,
616- "VDO Page Completion must be writable" );
617- if (result != UDS_SUCCESS )
615+ result = VDO_ASSERT (completion -> writable ,
616+ "VDO Page Completion must be writable" );
617+ if (result != VDO_SUCCESS )
618618 return result ;
619619 }
620620
@@ -776,7 +776,7 @@ static int __must_check launch_page_load(struct page_info *info,
776776 if (result != VDO_SUCCESS )
777777 return result ;
778778
779- result = ASSERT ((info -> busy == 0 ), "Page is not busy before loading." );
779+ result = VDO_ASSERT ((info -> busy == 0 ), "Page is not busy before loading." );
780780 if (result != VDO_SUCCESS )
781781 return result ;
782782
@@ -949,8 +949,8 @@ static void discard_a_page(struct vdo_page_cache *cache)
949949 return ;
950950 }
951951
952- ASSERT_LOG_ONLY (!is_in_flight (info ),
953- "page selected for discard is not in flight" );
952+ VDO_ASSERT_LOG_ONLY (!is_in_flight (info ),
953+ "page selected for discard is not in flight" );
954954
955955 cache -> discard_count ++ ;
956956 info -> write_status = WRITE_STATUS_DISCARD ;
@@ -1153,8 +1153,8 @@ void vdo_release_page_completion(struct vdo_completion *completion)
11531153 discard_info = page_completion -> info ;
11541154 }
11551155
1156- ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1157- "Page being released after leaving all queues" );
1156+ VDO_ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1157+ "Page being released after leaving all queues" );
11581158
11591159 page_completion -> info = NULL ;
11601160 cache = page_completion -> cache ;
@@ -1217,8 +1217,8 @@ void vdo_get_page(struct vdo_page_completion *page_completion,
12171217 struct page_info * info ;
12181218
12191219 assert_on_cache_thread (cache , __func__ );
1220- ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1221- "New page completion was not already on a wait queue" );
1220+ VDO_ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1221+ "New page completion was not already on a wait queue" );
12221222
12231223 * page_completion = (struct vdo_page_completion ) {
12241224 .pbn = pbn ,
@@ -1265,7 +1265,7 @@ void vdo_get_page(struct vdo_page_completion *page_completion,
12651265 }
12661266
12671267 /* Something horrible has gone wrong. */
1268- ASSERT_LOG_ONLY (false, "Info found in a usable state." );
1268+ VDO_ASSERT_LOG_ONLY (false, "Info found in a usable state." );
12691269 }
12701270
12711271 /* The page must be fetched. */
@@ -1334,7 +1334,7 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
13341334
13351335 /* Make sure we don't throw away any dirty pages. */
13361336 for (info = cache -> infos ; info < cache -> infos + cache -> page_count ; info ++ ) {
1337- int result = ASSERT (!is_dirty (info ), "cache must have no dirty pages" );
1337+ int result = VDO_ASSERT (!is_dirty (info ), "cache must have no dirty pages" );
13381338
13391339 if (result != VDO_SUCCESS )
13401340 return result ;
@@ -1440,10 +1440,10 @@ static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
14401440{
14411441 int result ;
14421442
1443- result = ASSERT ((in_cyclic_range (zone -> oldest_generation , a , zone -> generation , 1 << 8 ) &&
1444- in_cyclic_range (zone -> oldest_generation , b , zone -> generation , 1 << 8 )),
1445- "generation(s) %u, %u are out of range [%u, %u]" ,
1446- a , b , zone -> oldest_generation , zone -> generation );
1443+ result = VDO_ASSERT ((in_cyclic_range (zone -> oldest_generation , a , zone -> generation , 1 << 8 ) &&
1444+ in_cyclic_range (zone -> oldest_generation , b , zone -> generation , 1 << 8 )),
1445+ "generation(s) %u, %u are out of range [%u, %u]" ,
1446+ a , b , zone -> oldest_generation , zone -> generation );
14471447 if (result != VDO_SUCCESS ) {
14481448 enter_zone_read_only_mode (zone , result );
14491449 return true;
@@ -1456,8 +1456,8 @@ static void release_generation(struct block_map_zone *zone, u8 generation)
14561456{
14571457 int result ;
14581458
1459- result = ASSERT ((zone -> dirty_page_counts [generation ] > 0 ),
1460- "dirty page count underflow for generation %u" , generation );
1459+ result = VDO_ASSERT ((zone -> dirty_page_counts [generation ] > 0 ),
1460+ "dirty page count underflow for generation %u" , generation );
14611461 if (result != VDO_SUCCESS ) {
14621462 enter_zone_read_only_mode (zone , result );
14631463 return ;
@@ -1482,8 +1482,8 @@ static void set_generation(struct block_map_zone *zone, struct tree_page *page,
14821482
14831483 page -> generation = new_generation ;
14841484 new_count = ++ zone -> dirty_page_counts [new_generation ];
1485- result = ASSERT ((new_count != 0 ), "dirty page count overflow for generation %u" ,
1486- new_generation );
1485+ result = VDO_ASSERT ((new_count != 0 ), "dirty page count overflow for generation %u" ,
1486+ new_generation );
14871487 if (result != VDO_SUCCESS ) {
14881488 enter_zone_read_only_mode (zone , result );
14891489 return ;
@@ -1698,15 +1698,15 @@ static void release_page_lock(struct data_vio *data_vio, char *what)
16981698 struct tree_lock * lock_holder ;
16991699 struct tree_lock * lock = & data_vio -> tree_lock ;
17001700
1701- ASSERT_LOG_ONLY (lock -> locked ,
1702- "release of unlocked block map page %s for key %llu in tree %u" ,
1703- what , (unsigned long long ) lock -> key , lock -> root_index );
1701+ VDO_ASSERT_LOG_ONLY (lock -> locked ,
1702+ "release of unlocked block map page %s for key %llu in tree %u" ,
1703+ what , (unsigned long long ) lock -> key , lock -> root_index );
17041704
17051705 zone = data_vio -> logical .zone -> block_map_zone ;
17061706 lock_holder = vdo_int_map_remove (zone -> loading_pages , lock -> key );
1707- ASSERT_LOG_ONLY ((lock_holder == lock ),
1708- "block map page %s mismatch for key %llu in tree %u" ,
1709- what , (unsigned long long ) lock -> key , lock -> root_index );
1707+ VDO_ASSERT_LOG_ONLY ((lock_holder == lock ),
1708+ "block map page %s mismatch for key %llu in tree %u" ,
1709+ what , (unsigned long long ) lock -> key , lock -> root_index );
17101710 lock -> locked = false;
17111711}
17121712
@@ -2008,8 +2008,8 @@ static void write_expired_elements(struct block_map_zone *zone)
20082008
20092009 list_del_init (& page -> entry );
20102010
2011- result = ASSERT (!vdo_waiter_is_waiting (& page -> waiter ),
2012- "Newly expired page not already waiting to write" );
2011+ result = VDO_ASSERT (!vdo_waiter_is_waiting (& page -> waiter ),
2012+ "Newly expired page not already waiting to write" );
20132013 if (result != VDO_SUCCESS ) {
20142014 enter_zone_read_only_mode (zone , result );
20152015 continue ;
@@ -2867,8 +2867,8 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical
28672867 BUILD_BUG_ON (VDO_BLOCK_MAP_ENTRIES_PER_PAGE !=
28682868 ((VDO_BLOCK_SIZE - sizeof (struct block_map_page )) /
28692869 sizeof (struct block_map_entry )));
2870- result = ASSERT (cache_size > 0 , "block map cache size is specified" );
2871- if (result != UDS_SUCCESS )
2870+ result = VDO_ASSERT (cache_size > 0 , "block map cache size is specified" );
2871+ if (result != VDO_SUCCESS )
28722872 return result ;
28732873
28742874 result = vdo_allocate_extended (struct block_map ,
@@ -2937,7 +2937,7 @@ void vdo_initialize_block_map_from_journal(struct block_map *map,
29372937 for (z = 0 ; z < map -> zone_count ; z ++ ) {
29382938 struct dirty_lists * dirty_lists = map -> zones [z ].dirty_lists ;
29392939
2940- ASSERT_LOG_ONLY (dirty_lists -> next_period == 0 , "current period not set" );
2940+ VDO_ASSERT_LOG_ONLY (dirty_lists -> next_period == 0 , "current period not set" );
29412941 dirty_lists -> oldest_period = map -> current_era_point ;
29422942 dirty_lists -> next_period = map -> current_era_point + 1 ;
29432943 dirty_lists -> offset = map -> current_era_point % dirty_lists -> maximum_age ;
@@ -2971,8 +2971,8 @@ static void initiate_drain(struct admin_state *state)
29712971{
29722972 struct block_map_zone * zone = container_of (state , struct block_map_zone , state );
29732973
2974- ASSERT_LOG_ONLY ((zone -> active_lookups == 0 ),
2975- "%s() called with no active lookups" , __func__ );
2974+ VDO_ASSERT_LOG_ONLY ((zone -> active_lookups == 0 ),
2975+ "%s() called with no active lookups" , __func__ );
29762976
29772977 if (!vdo_is_state_suspending (state )) {
29782978 while (zone -> dirty_lists -> oldest_period < zone -> dirty_lists -> next_period )
0 commit comments