@@ -77,7 +77,17 @@ enum _slab_flag_bits {
7777#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
7878/* Indicate a kmalloc slab */
7979#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
80- /* Align objs on cache lines */
80+ /**
81+ * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
82+ *
83+ * Sufficiently large objects are aligned on cache line boundary. For object
84+ * size smaller than a half of cache line size, the alignment is on the half of
85+ * cache line size. In general, if object size is smaller than 1/2^n of cache
86+ * line size, the alignment is adjusted to 1/2^n.
87+ *
88+ * If explicit alignment is also requested by the respective
89+ * &struct kmem_cache_args field, the greater of both is alignments is applied.
90+ */
8191#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
8292/* Use GFP_DMA memory */
8393#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
@@ -87,8 +97,8 @@ enum _slab_flag_bits {
8797#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
8898/* Panic if kmem_cache_create() fails */
8999#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
90- /*
91- * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
100+ /**
101+ * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
92102 *
93103 * This delays freeing the SLAB page by a grace period, it does _NOT_
94104 * delay object freeing. This means that if you do kmem_cache_free()
@@ -99,20 +109,22 @@ enum _slab_flag_bits {
99109 * stays valid, the trick to using this is relying on an independent
100110 * object validation pass. Something like:
101111 *
102- * begin:
103- * rcu_read_lock();
104- * obj = lockless_lookup(key);
105- * if (obj) {
106- * if (!try_get_ref(obj)) // might fail for free objects
107- * rcu_read_unlock();
108- * goto begin;
112+ * ::
113+ *
114+ * begin:
115+ * rcu_read_lock();
116+ * obj = lockless_lookup(key);
117+ * if (obj) {
118+ * if (!try_get_ref(obj)) // might fail for free objects
119+ * rcu_read_unlock();
120+ * goto begin;
109121 *
110- * if (obj->key != key) { // not the object we expected
111- * put_ref(obj);
112- * rcu_read_unlock();
113- * goto begin;
114- * }
115- * }
122+ * if (obj->key != key) { // not the object we expected
123+ * put_ref(obj);
124+ * rcu_read_unlock();
125+ * goto begin;
126+ * }
127+ * }
116128 * rcu_read_unlock();
117129 *
118130 * This is useful if we need to approach a kernel structure obliquely,
@@ -137,7 +149,6 @@ enum _slab_flag_bits {
137149 *
138150 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
139151 */
140- /* Defer freeing slabs to RCU */
141152#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
142153/* Trace allocations and frees */
143154#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
@@ -170,7 +181,12 @@ enum _slab_flag_bits {
170181#else
171182# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
172183#endif
173- /* Account to memcg */
184+ /**
185+ * define SLAB_ACCOUNT - Account allocations to memcg.
186+ *
187+ * All object allocations from this cache will be memcg accounted, regardless of
188+ * __GFP_ACCOUNT being or not being passed to individual allocations.
189+ */
174190#ifdef CONFIG_MEMCG
175191# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
176192#else
@@ -197,7 +213,13 @@ enum _slab_flag_bits {
197213#endif
198214
199215/* The following flags affect the page allocator grouping pages by mobility */
200- /* Objects are reclaimable */
216+ /**
217+ * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
218+ *
219+ * Use this flag for caches that have an associated shrinker. As a result, slab
220+ * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
221+ * mobility, and are accounted in SReclaimable counter in /proc/meminfo
222+ */
201223#ifndef CONFIG_SLUB_TINY
202224#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
203225#else
0 commit comments