@@ -1344,44 +1344,60 @@ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
13441344}
13451345
13461346/*
1347- * Object layout:
1347+ * Object field layout:
13481348 *
1349- * object address
1350- * Bytes of the object to be managed.
1351- * If the freepointer may overlay the object then the free
1352- * pointer is at the middle of the object.
1349+ * [Left redzone padding] (if SLAB_RED_ZONE)
1350+ * - Field size: s->red_left_pad
1351+ * - Immediately precedes each object when SLAB_RED_ZONE is set.
1352+ * - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1353+ * 0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
13531354 *
1354- * Poisoning uses 0x6b (POISON_FREE) and the last byte is
1355- * 0xa5 (POISON_END)
1355+ * [Object bytes] (object address starts here)
1356+ * - Field size: s->object_size
1357+ * - Object payload bytes.
1358+ * - If the freepointer may overlap the object, it is stored inside
1359+ * the object (typically near the middle).
1360+ * - Poisoning uses 0x6b (POISON_FREE) and the last byte is
1361+ * 0xa5 (POISON_END) when __OBJECT_POISON is enabled.
13561362 *
1357- * object + s->object_size
1358- * Padding to reach word boundary. This is also used for Redzoning.
1359- * Padding is extended by another word if Redzoning is enabled and
1360- * object_size == inuse.
1363+ * [Word-align padding] (right redzone when SLAB_RED_ZONE is set)
1364+ * - Field size: s->inuse - s->object_size
1365+ * - If redzoning is enabled and ALIGN(size, sizeof(void *)) adds no
1366+ * padding, explicitly extend by one word so the right redzone is
1367+ * non-empty.
1368+ * - Filled with 0xbb (SLUB_RED_INACTIVE) for inactive objects and
1369+ * 0xcc (SLUB_RED_ACTIVE) for objects in use when SLAB_RED_ZONE.
13611370 *
1362- * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with
1363- * 0xcc (SLUB_RED_ACTIVE) for objects in use.
1371+ * [Metadata starts at object + s->inuse]
1372+ * - A. freelist pointer (if freeptr_outside_object)
1373+ * - B. alloc tracking (SLAB_STORE_USER)
1374+ * - C. free tracking (SLAB_STORE_USER)
1375+ * - D. original request size (SLAB_KMALLOC && SLAB_STORE_USER)
1376+ * - E. KASAN metadata (if enabled)
13641377 *
1365- * object + s->inuse
1366- * Meta data starts here.
1378+ * [Mandatory padding] (if CONFIG_SLUB_DEBUG && SLAB_RED_ZONE)
1379+ * - One mandatory debug word to guarantee a minimum poisoned gap
1380+ * between metadata and the next object, independent of alignment.
1381+ * - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
1382+ * [Final alignment padding]
1383+ * - Any bytes added by ALIGN(size, s->align) to reach s->size.
1384+ * - Filled with 0x5a (POISON_INUSE) when SLAB_POISON is set.
13671385 *
1368- * A. Free pointer (if we cannot overwrite object on free)
1369- * B. Tracking data for SLAB_STORE_USER
1370- * C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1371- * D. Padding to reach required alignment boundary or at minimum
1372- * one word if debugging is on to be able to detect writes
1373- * before the word boundary.
1386+ * Notes:
1387+ * - Redzones are filled by init_object() with SLUB_RED_ACTIVE/INACTIVE.
1388+ * - Object contents are poisoned with POISON_FREE/END when __OBJECT_POISON.
1389+ * - The trailing padding is pre-filled with POISON_INUSE by
1390+ * setup_slab_debug() when SLAB_POISON is set, and is validated by
1391+ * check_pad_bytes().
1392+ * - The first object pointer is slab_address(slab) +
1393+ * (s->red_left_pad if redzoning); subsequent objects are reached by
1394+ * adding s->size each time.
13741395 *
1375- * Padding is done using 0x5a (POISON_INUSE)
1376- *
1377- * object + s->size
1378- * Nothing is used beyond s->size.
1379- *
1380- * If slabcaches are merged then the object_size and inuse boundaries are mostly
1381- * ignored. And therefore no slab options that rely on these boundaries
1382- * may be used with merged slabcaches.
1396+ * If a slab cache flag relies on specific metadata to exist at a fixed
1397+ * offset, the flag must be included in SLAB_NEVER_MERGE to prevent merging.
1398+ * Otherwise, the cache would misbehave as s->object_size and s->inuse are
1399+ * adjusted during cache merging (see __kmem_cache_alias()).
13831400 */
1384-
13851401static int check_pad_bytes (struct kmem_cache * s , struct slab * slab , u8 * p )
13861402{
13871403 unsigned long off = get_info_end (s ); /* The end of info */
@@ -7967,9 +7983,9 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
79677983
79687984
79697985 /*
7970- * If we are Redzoning then check if there is some space between the
7971- * end of the object and the free pointer. If not then add an
7972- * additional word to have some bytes to store Redzone information .
7986+ * If we are Redzoning and there is no space between the end of the
7987+ * object and the following fields, add one word so the right Redzone
7988+ * is non-empty .
79737989 */
79747990 if ((flags & SLAB_RED_ZONE ) && size == s -> object_size )
79757991 size += sizeof (void * );
0 commit comments