@@ -897,6 +897,109 @@ void free_large_kmalloc(struct folio *folio, void *object)
897897 - (PAGE_SIZE << order ));
898898 __free_pages (folio_page (folio , 0 ), order );
899899}
900+
901+ static void * __kmalloc_large_node (size_t size , gfp_t flags , int node );
902+ static __always_inline
903+ void * __do_kmalloc_node (size_t size , gfp_t flags , int node , unsigned long caller )
904+ {
905+ struct kmem_cache * s ;
906+ void * ret ;
907+
908+ if (unlikely (size > KMALLOC_MAX_CACHE_SIZE )) {
909+ ret = __kmalloc_large_node (size , flags , node );
910+ trace_kmalloc_node (caller , ret , NULL ,
911+ size , PAGE_SIZE << get_order (size ),
912+ flags , node );
913+ return ret ;
914+ }
915+
916+ s = kmalloc_slab (size , flags );
917+
918+ if (unlikely (ZERO_OR_NULL_PTR (s )))
919+ return s ;
920+
921+ ret = __kmem_cache_alloc_node (s , flags , node , size , caller );
922+ ret = kasan_kmalloc (s , ret , size , flags );
923+ trace_kmalloc_node (caller , ret , s , size ,
924+ s -> size , flags , node );
925+ return ret ;
926+ }
927+
928+ void * __kmalloc_node (size_t size , gfp_t flags , int node )
929+ {
930+ return __do_kmalloc_node (size , flags , node , _RET_IP_ );
931+ }
932+ EXPORT_SYMBOL (__kmalloc_node );
933+
934+ void * __kmalloc (size_t size , gfp_t flags )
935+ {
936+ return __do_kmalloc_node (size , flags , NUMA_NO_NODE , _RET_IP_ );
937+ }
938+ EXPORT_SYMBOL (__kmalloc );
939+
940+ void * __kmalloc_node_track_caller (size_t size , gfp_t flags ,
941+ int node , unsigned long caller )
942+ {
943+ return __do_kmalloc_node (size , flags , node , caller );
944+ }
945+ EXPORT_SYMBOL (__kmalloc_node_track_caller );
946+
947+ /**
948+ * kfree - free previously allocated memory
949+ * @object: pointer returned by kmalloc.
950+ *
951+ * If @object is NULL, no operation is performed.
952+ *
953+ * Don't free memory not originally allocated by kmalloc()
954+ * or you will run into trouble.
955+ */
956+ void kfree (const void * object )
957+ {
958+ struct folio * folio ;
959+ struct slab * slab ;
960+ struct kmem_cache * s ;
961+
962+ trace_kfree (_RET_IP_ , object );
963+
964+ if (unlikely (ZERO_OR_NULL_PTR (object )))
965+ return ;
966+
967+ folio = virt_to_folio (object );
968+ if (unlikely (!folio_test_slab (folio ))) {
969+ free_large_kmalloc (folio , (void * )object );
970+ return ;
971+ }
972+
973+ slab = folio_slab (folio );
974+ s = slab -> slab_cache ;
975+ __kmem_cache_free (s , (void * )object , _RET_IP_ );
976+ }
977+ EXPORT_SYMBOL (kfree );
978+
979+ /**
980+ * __ksize -- Uninstrumented ksize.
981+ * @object: pointer to the object
982+ *
983+ * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
984+ * safety checks as ksize() with KASAN instrumentation enabled.
985+ *
986+ * Return: size of the actual memory used by @object in bytes
987+ */
988+ size_t __ksize (const void * object )
989+ {
990+ struct folio * folio ;
991+
992+ if (unlikely (object == ZERO_SIZE_PTR ))
993+ return 0 ;
994+
995+ folio = virt_to_folio (object );
996+
997+ if (unlikely (!folio_test_slab (folio )))
998+ return folio_size (folio );
999+
1000+ return slab_ksize (folio_slab (folio )-> slab_cache );
1001+ }
1002+ EXPORT_SYMBOL (__ksize );
9001003#endif /* !CONFIG_SLOB */
9011004
9021005gfp_t kmalloc_fix_flags (gfp_t flags )
@@ -917,7 +1020,7 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
9171020 * know the allocation order to free the pages properly in kfree.
9181021 */
9191022
920- void * kmalloc_large_node_notrace (size_t size , gfp_t flags , int node )
1023+ static void * __kmalloc_large_node (size_t size , gfp_t flags , int node )
9211024{
9221025 struct page * page ;
9231026 void * ptr = NULL ;
@@ -943,7 +1046,7 @@ void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
9431046
9441047void * kmalloc_large (size_t size , gfp_t flags )
9451048{
946- void * ret = kmalloc_large_node_notrace (size , flags , NUMA_NO_NODE );
1049+ void * ret = __kmalloc_large_node (size , flags , NUMA_NO_NODE );
9471050
9481051 trace_kmalloc (_RET_IP_ , ret , NULL , size ,
9491052 PAGE_SIZE << get_order (size ), flags );
@@ -953,7 +1056,7 @@ EXPORT_SYMBOL(kmalloc_large);
9531056
9541057void * kmalloc_large_node (size_t size , gfp_t flags , int node )
9551058{
956- void * ret = kmalloc_large_node_notrace (size , flags , node );
1059+ void * ret = __kmalloc_large_node (size , flags , node );
9571060
9581061 trace_kmalloc_node (_RET_IP_ , ret , NULL , size ,
9591062 PAGE_SIZE << get_order (size ), flags , node );
0 commit comments