2222#include "kernel-shared/transaction.h"
2323#include "kernel-shared/messages.h"
2424
25+ struct kmem_cache * btrfs_delayed_ref_head_cachep ;
26+ struct kmem_cache * btrfs_delayed_tree_ref_cachep ;
27+ struct kmem_cache * btrfs_delayed_data_ref_cachep ;
28+ struct kmem_cache * btrfs_delayed_extent_op_cachep ;
2529/*
2630 * delayed back reference update tracking. For subvolume trees
2731 * we queue up extent allocations and backref maintenance for
@@ -51,6 +55,34 @@ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
5155 return 0 ;
5256}
5357
58+ /*
59+ * compare two delayed data backrefs with same bytenr and type
60+ */
61+ static int comp_data_refs (struct btrfs_delayed_data_ref * ref1 ,
62+ struct btrfs_delayed_data_ref * ref2 )
63+ {
64+ if (ref1 -> node .type == BTRFS_EXTENT_DATA_REF_KEY ) {
65+ if (ref1 -> root < ref2 -> root )
66+ return -1 ;
67+ if (ref1 -> root > ref2 -> root )
68+ return 1 ;
69+ if (ref1 -> objectid < ref2 -> objectid )
70+ return -1 ;
71+ if (ref1 -> objectid > ref2 -> objectid )
72+ return 1 ;
73+ if (ref1 -> offset < ref2 -> offset )
74+ return -1 ;
75+ if (ref1 -> offset > ref2 -> offset )
76+ return 1 ;
77+ } else {
78+ if (ref1 -> parent < ref2 -> parent )
79+ return -1 ;
80+ if (ref1 -> parent > ref2 -> parent )
81+ return 1 ;
82+ }
83+ return 0 ;
84+ }
85+
5486static int comp_refs (struct btrfs_delayed_ref_node * ref1 ,
5587 struct btrfs_delayed_ref_node * ref2 ,
5688 bool check_seq )
@@ -66,8 +98,8 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
6698 ret = comp_tree_refs (btrfs_delayed_node_to_tree_ref (ref1 ),
6799 btrfs_delayed_node_to_tree_ref (ref2 ));
68100 else
69- BUG ();
70-
101+ ret = comp_data_refs ( btrfs_delayed_node_to_data_ref ( ref1 ),
102+ btrfs_delayed_node_to_data_ref ( ref2 ));
71103 if (ret )
72104 return ret ;
73105 if (check_seq ) {
@@ -299,14 +331,28 @@ btrfs_select_ref_head(struct btrfs_trans_handle *trans)
299331 href_node );
300332 }
301333
302- head -> processing = 1 ;
334+ head -> processing = true ;
303335 WARN_ON (delayed_refs -> num_heads_ready == 0 );
304336 delayed_refs -> num_heads_ready -- ;
305337 delayed_refs -> run_delayed_start = head -> bytenr +
306338 head -> num_bytes ;
307339 return head ;
308340}
309341
342+ void btrfs_delete_ref_head (struct btrfs_delayed_ref_root * delayed_refs ,
343+ struct btrfs_delayed_ref_head * head )
344+ {
345+ lockdep_assert_held (& delayed_refs -> lock );
346+ lockdep_assert_held (& head -> lock );
347+
348+ rb_erase (& head -> href_node , & delayed_refs -> href_root );
349+ RB_CLEAR_NODE (& head -> href_node );
350+ atomic_dec (& delayed_refs -> num_entries );
351+ delayed_refs -> num_heads -- ;
352+ if (!head -> processing )
353+ delayed_refs -> num_heads_ready -- ;
354+ }
355+
310356/*
311357 * Helper to insert the ref_node to the tail or merge with tail.
312358 *
@@ -431,7 +477,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
431477 bool is_system )
432478{
433479 int count_mod = 1 ;
434- int must_insert_reserved = 0 ;
480+ bool must_insert_reserved = false ;
435481
436482 /* If reserved is provided, it must be a data extent. */
437483 BUG_ON (!is_data && reserved );
@@ -456,11 +502,11 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
456502 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
457503 */
458504 if (action == BTRFS_ADD_DELAYED_EXTENT )
459- must_insert_reserved = 1 ;
505+ must_insert_reserved = true ;
460506 else
461- must_insert_reserved = 0 ;
507+ must_insert_reserved = false ;
462508
463- head_ref -> refs = 1 ;
509+ refcount_set ( & head_ref -> refs , 1 ) ;
464510 head_ref -> bytenr = bytenr ;
465511 head_ref -> num_bytes = num_bytes ;
466512 head_ref -> ref_mod = count_mod ;
@@ -470,7 +516,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
470516 head_ref -> ref_tree = RB_ROOT ;
471517 INIT_LIST_HEAD (& head_ref -> ref_add_list );
472518 RB_CLEAR_NODE (& head_ref -> href_node );
473- head_ref -> processing = 0 ;
519+ head_ref -> processing = false ;
474520 head_ref -> total_ref_mod = count_mod ;
475521}
476522
@@ -546,7 +592,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
546592 if (action == BTRFS_ADD_DELAYED_EXTENT )
547593 action = BTRFS_ADD_DELAYED_REF ;
548594
549- ref -> refs = 1 ;
595+ refcount_set ( & ref -> refs , 1 ) ;
550596 ref -> bytenr = bytenr ;
551597 ref -> num_bytes = num_bytes ;
552598 ref -> ref_mod = 1 ;
@@ -642,3 +688,47 @@ void btrfs_destroy_delayed_refs(struct btrfs_trans_handle *trans)
642688 ASSERT (cleanup_ref_head (trans , fs_info , head ) == 0 );
643689 }
644690}
691+
692+ void __cold btrfs_delayed_ref_exit (void )
693+ {
694+ kmem_cache_destroy (btrfs_delayed_ref_head_cachep );
695+ kmem_cache_destroy (btrfs_delayed_tree_ref_cachep );
696+ kmem_cache_destroy (btrfs_delayed_data_ref_cachep );
697+ kmem_cache_destroy (btrfs_delayed_extent_op_cachep );
698+ }
699+
700+ int __init btrfs_delayed_ref_init (void )
701+ {
702+ btrfs_delayed_ref_head_cachep = kmem_cache_create (
703+ "btrfs_delayed_ref_head" ,
704+ sizeof (struct btrfs_delayed_ref_head ), 0 ,
705+ SLAB_MEM_SPREAD , NULL );
706+ if (!btrfs_delayed_ref_head_cachep )
707+ goto fail ;
708+
709+ btrfs_delayed_tree_ref_cachep = kmem_cache_create (
710+ "btrfs_delayed_tree_ref" ,
711+ sizeof (struct btrfs_delayed_tree_ref ), 0 ,
712+ SLAB_MEM_SPREAD , NULL );
713+ if (!btrfs_delayed_tree_ref_cachep )
714+ goto fail ;
715+
716+ btrfs_delayed_data_ref_cachep = kmem_cache_create (
717+ "btrfs_delayed_data_ref" ,
718+ sizeof (struct btrfs_delayed_data_ref ), 0 ,
719+ SLAB_MEM_SPREAD , NULL );
720+ if (!btrfs_delayed_data_ref_cachep )
721+ goto fail ;
722+
723+ btrfs_delayed_extent_op_cachep = kmem_cache_create (
724+ "btrfs_delayed_extent_op" ,
725+ sizeof (struct btrfs_delayed_extent_op ), 0 ,
726+ SLAB_MEM_SPREAD , NULL );
727+ if (!btrfs_delayed_extent_op_cachep )
728+ goto fail ;
729+
730+ return 0 ;
731+ fail :
732+ btrfs_delayed_ref_exit ();
733+ return - ENOMEM ;
734+ }
0 commit comments