Skip to content

Commit a8c861f

Browse files
Christoph Hellwigcmaiolino
authored andcommitted
xfs: avoid busy loops in GCD
When GCD has no new work to handle, but read, write or reset commands are outstanding, it currently busy loops, which is a bit suboptimal, and can lead to softlockup warnings in case of stuck commands. Change the code so that the task state is only set to running when work is performed, which looks a bit tricky due to the design of the reading/writing/resetting lists that contain both in-flight and finished commands. Fixes: 080d01c ("xfs: implement zoned garbage collection") Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Carlos Maiolino <cem@kernel.org>
1 parent f5caeb3 commit a8c861f

File tree

1 file changed

+46
-35
lines changed

1 file changed

+46
-35
lines changed

fs/xfs/xfs_zone_gc.c

Lines changed: 46 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -491,21 +491,6 @@ xfs_zone_gc_select_victim(
491491
struct xfs_rtgroup *victim_rtg = NULL;
492492
unsigned int bucket;
493493

494-
if (xfs_is_shutdown(mp))
495-
return false;
496-
497-
if (iter->victim_rtg)
498-
return true;
499-
500-
/*
501-
* Don't start new work if we are asked to stop or park.
502-
*/
503-
if (kthread_should_stop() || kthread_should_park())
504-
return false;
505-
506-
if (!xfs_zoned_need_gc(mp))
507-
return false;
508-
509494
spin_lock(&zi->zi_used_buckets_lock);
510495
for (bucket = 0; bucket < XFS_ZONE_USED_BUCKETS; bucket++) {
511496
victim_rtg = xfs_zone_gc_pick_victim_from(mp, bucket);
@@ -975,14 +960,35 @@ xfs_zone_gc_reset_zones(
975960
} while (next);
976961
}
977962

963+
static bool
964+
xfs_zone_gc_should_start_new_work(
965+
struct xfs_zone_gc_data *data)
966+
{
967+
if (xfs_is_shutdown(data->mp))
968+
return false;
969+
if (!xfs_zone_gc_space_available(data))
970+
return false;
971+
972+
if (!data->iter.victim_rtg) {
973+
if (kthread_should_stop() || kthread_should_park())
974+
return false;
975+
if (!xfs_zoned_need_gc(data->mp))
976+
return false;
977+
if (!xfs_zone_gc_select_victim(data))
978+
return false;
979+
}
980+
981+
return true;
982+
}
983+
978984
/*
979985
* Handle the work to read and write data for GC and to reset the zones,
980986
* including handling all completions.
981987
*
982988
* Note that the order of the chunks is preserved so that we don't undo the
983989
* optimal order established by xfs_zone_gc_query().
984990
*/
985-
static bool
991+
static void
986992
xfs_zone_gc_handle_work(
987993
struct xfs_zone_gc_data *data)
988994
{
@@ -996,46 +1002,41 @@ xfs_zone_gc_handle_work(
9961002
zi->zi_reset_list = NULL;
9971003
spin_unlock(&zi->zi_reset_list_lock);
9981004

999-
if (!xfs_zone_gc_select_victim(data) ||
1000-
!xfs_zone_gc_space_available(data)) {
1001-
if (list_empty(&data->reading) &&
1002-
list_empty(&data->writing) &&
1003-
list_empty(&data->resetting) &&
1004-
!reset_list)
1005-
return false;
1006-
}
1007-
1008-
__set_current_state(TASK_RUNNING);
1009-
try_to_freeze();
1010-
1011-
if (reset_list)
1005+
if (reset_list) {
1006+
set_current_state(TASK_RUNNING);
10121007
xfs_zone_gc_reset_zones(data, reset_list);
1008+
}
10131009

10141010
list_for_each_entry_safe(chunk, next, &data->resetting, entry) {
10151011
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
10161012
break;
1013+
set_current_state(TASK_RUNNING);
10171014
xfs_zone_gc_finish_reset(chunk);
10181015
}
10191016

10201017
list_for_each_entry_safe(chunk, next, &data->writing, entry) {
10211018
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
10221019
break;
1020+
set_current_state(TASK_RUNNING);
10231021
xfs_zone_gc_finish_chunk(chunk);
10241022
}
10251023

10261024
blk_start_plug(&plug);
10271025
list_for_each_entry_safe(chunk, next, &data->reading, entry) {
10281026
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
10291027
break;
1028+
set_current_state(TASK_RUNNING);
10301029
xfs_zone_gc_write_chunk(chunk);
10311030
}
10321031
blk_finish_plug(&plug);
10331032

1034-
blk_start_plug(&plug);
1035-
while (xfs_zone_gc_start_chunk(data))
1036-
;
1037-
blk_finish_plug(&plug);
1038-
return true;
1033+
if (xfs_zone_gc_should_start_new_work(data)) {
1034+
set_current_state(TASK_RUNNING);
1035+
blk_start_plug(&plug);
1036+
while (xfs_zone_gc_start_chunk(data))
1037+
;
1038+
blk_finish_plug(&plug);
1039+
}
10391040
}
10401041

10411042
/*
@@ -1059,8 +1060,18 @@ xfs_zoned_gcd(
10591060
for (;;) {
10601061
set_current_state(TASK_INTERRUPTIBLE | TASK_FREEZABLE);
10611062
xfs_set_zonegc_running(mp);
1062-
if (xfs_zone_gc_handle_work(data))
1063+
1064+
xfs_zone_gc_handle_work(data);
1065+
1066+
/*
1067+
* Only sleep if nothing set the state to running. Else check for
1068+
* work again as someone might have queued up more work and woken
1069+
* us in the meantime.
1070+
*/
1071+
if (get_current_state() == TASK_RUNNING) {
1072+
try_to_freeze();
10631073
continue;
1074+
}
10641075

10651076
if (list_empty(&data->reading) &&
10661077
list_empty(&data->writing) &&

0 commit comments

Comments
 (0)