@@ -406,6 +406,12 @@ struct cache {
406406 mempool_t migration_pool ;
407407
408408 struct bio_set bs ;
409+
410+ /*
411+ * Cache_size entries. Set bits indicate blocks mapped beyond the
412+ * target length, which are marked for invalidation.
413+ */
414+ unsigned long * invalid_bitset ;
409415};
410416
411417struct per_bio_data {
@@ -1922,6 +1928,9 @@ static void __destroy(struct cache *cache)
19221928 if (cache -> discard_bitset )
19231929 free_bitset (cache -> discard_bitset );
19241930
1931+ if (cache -> invalid_bitset )
1932+ free_bitset (cache -> invalid_bitset );
1933+
19251934 if (cache -> copier )
19261935 dm_kcopyd_client_destroy (cache -> copier );
19271936
@@ -2510,6 +2519,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
25102519 }
25112520 clear_bitset (cache -> discard_bitset , from_dblock (cache -> discard_nr_blocks ));
25122521
2522+ cache -> invalid_bitset = alloc_bitset (from_cblock (cache -> cache_size ));
2523+ if (!cache -> invalid_bitset ) {
2524+ * error = "could not allocate bitset for invalid blocks" ;
2525+ goto bad ;
2526+ }
2527+ clear_bitset (cache -> invalid_bitset , from_cblock (cache -> cache_size ));
2528+
25132529 cache -> copier = dm_kcopyd_client_create (& dm_kcopyd_throttle );
25142530 if (IS_ERR (cache -> copier )) {
25152531 * error = "could not create kcopyd client" ;
@@ -2808,6 +2824,24 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
28082824 return policy_load_mapping (cache -> policy , oblock , cblock , dirty , hint , hint_valid );
28092825}
28102826
2827+ static int load_filtered_mapping (void * context , dm_oblock_t oblock , dm_cblock_t cblock ,
2828+ bool dirty , uint32_t hint , bool hint_valid )
2829+ {
2830+ struct cache * cache = context ;
2831+
2832+ if (from_oblock (oblock ) >= from_oblock (cache -> origin_blocks )) {
2833+ if (dirty ) {
2834+ DMERR ("%s: unable to shrink origin; cache block %u is dirty" ,
2835+ cache_device_name (cache ), from_cblock (cblock ));
2836+ return - EFBIG ;
2837+ }
2838+ set_bit (from_cblock (cblock ), cache -> invalid_bitset );
2839+ return 0 ;
2840+ }
2841+
2842+ return load_mapping (context , oblock , cblock , dirty , hint , hint_valid );
2843+ }
2844+
28112845/*
28122846 * The discard block size in the on disk metadata is not
28132847 * necessarily the same as we're currently using. So we have to
@@ -2899,6 +2933,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
28992933 return to_cblock (size );
29002934}
29012935
2936+ static bool can_resume (struct cache * cache )
2937+ {
2938+ /*
2939+ * Disallow retrying the resume operation for devices that failed the
2940+ * first resume attempt, as the failure leaves the policy object partially
2941+ * initialized. Retrying could trigger BUG_ON when loading cache mappings
2942+ * into the incomplete policy object.
2943+ */
2944+ if (cache -> sized && !cache -> loaded_mappings ) {
2945+ if (get_cache_mode (cache ) != CM_WRITE )
2946+ DMERR ("%s: unable to resume a failed-loaded cache, please check metadata." ,
2947+ cache_device_name (cache ));
2948+ else
2949+ DMERR ("%s: unable to resume cache due to missing proper cache table reload" ,
2950+ cache_device_name (cache ));
2951+ return false;
2952+ }
2953+
2954+ return true;
2955+ }
2956+
29022957static bool can_resize (struct cache * cache , dm_cblock_t new_size )
29032958{
29042959 if (from_cblock (new_size ) > from_cblock (cache -> cache_size )) {
@@ -2941,12 +2996,33 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
29412996 return 0 ;
29422997}
29432998
2999+ static int truncate_oblocks (struct cache * cache )
3000+ {
3001+ uint32_t nr_blocks = from_cblock (cache -> cache_size );
3002+ uint32_t i ;
3003+ int r ;
3004+
3005+ for_each_set_bit (i , cache -> invalid_bitset , nr_blocks ) {
3006+ r = dm_cache_remove_mapping (cache -> cmd , to_cblock (i ));
3007+ if (r ) {
3008+ DMERR_LIMIT ("%s: invalidation failed; couldn't update on disk metadata" ,
3009+ cache_device_name (cache ));
3010+ return r ;
3011+ }
3012+ }
3013+
3014+ return 0 ;
3015+ }
3016+
29443017static int cache_preresume (struct dm_target * ti )
29453018{
29463019 int r = 0 ;
29473020 struct cache * cache = ti -> private ;
29483021 dm_cblock_t csize = get_cache_dev_size (cache );
29493022
3023+ if (!can_resume (cache ))
3024+ return - EINVAL ;
3025+
29503026 /*
29513027 * Check to see if the cache has resized.
29523028 */
@@ -2962,11 +3038,25 @@ static int cache_preresume(struct dm_target *ti)
29623038 }
29633039
29643040 if (!cache -> loaded_mappings ) {
3041+ /*
3042+ * The fast device could have been resized since the last
3043+ * failed preresume attempt. To be safe we start by a blank
3044+ * bitset for cache blocks.
3045+ */
3046+ clear_bitset (cache -> invalid_bitset , from_cblock (cache -> cache_size ));
3047+
29653048 r = dm_cache_load_mappings (cache -> cmd , cache -> policy ,
2966- load_mapping , cache );
3049+ load_filtered_mapping , cache );
29673050 if (r ) {
29683051 DMERR ("%s: could not load cache mappings" , cache_device_name (cache ));
2969- metadata_operation_failed (cache , "dm_cache_load_mappings" , r );
3052+ if (r != - EFBIG )
3053+ metadata_operation_failed (cache , "dm_cache_load_mappings" , r );
3054+ return r ;
3055+ }
3056+
3057+ r = truncate_oblocks (cache );
3058+ if (r ) {
3059+ metadata_operation_failed (cache , "dm_cache_remove_mapping" , r );
29703060 return r ;
29713061 }
29723062
@@ -3426,7 +3516,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
34263516
34273517static struct target_type cache_target = {
34283518 .name = "cache" ,
3429- .version = {2 , 2 , 0 },
3519+ .version = {2 , 3 , 0 },
34303520 .module = THIS_MODULE ,
34313521 .ctr = cache_ctr ,
34323522 .dtr = cache_dtr ,
0 commit comments