Skip to content

Commit ed6f45f

Browse files
prati0100akpm00
authored andcommitted
mm: shmem: export some functions to internal.h
shmem_inode_acct_blocks(), shmem_recalc_inode(), and shmem_add_to_page_cache() are used by shmem_alloc_and_add_folio(). This functionality will be used by memfd LUO integration. Link: https://lkml.kernel.org/r/20251125165850.3389713-13-pasha.tatashin@soleen.com Signed-off-by: Pratyush Yadav <ptyadav@amazon.de> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Tested-by: David Matlack <dmatlack@google.com> Cc: Aleksander Lobakin <aleksander.lobakin@intel.com> Cc: Alexander Graf <graf@amazon.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andriy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: anish kumar <yesanishhere@gmail.com> Cc: Anna Schumaker <anna.schumaker@oracle.com> Cc: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Borislav Betkov <bp@alien8.de> Cc: Chanwoo Choi <cw00.choi@samsung.com> Cc: Chen Ridong <chenridong@huawei.com> Cc: Chris Li <chrisl@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Daniel Wagner <wagi@kernel.org> Cc: Danilo Krummrich <dakr@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Jeffery <djeffery@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guixin Liu <kanie@linux.alibaba.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Joanthan Cameron <Jonathan.Cameron@huawei.com> Cc: Joel Granados <joel.granados@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Lennart Poettering <lennart@poettering.net> Cc: Leon Romanovsky <leon@kernel.org> Cc: Leon Romanovsky <leonro@nvidia.com> Cc: Lukas Wunner <lukas@wunner.de> Cc: Marc Rutland <mark.rutland@arm.com> Cc: Masahiro Yamada <masahiroy@kernel.org> Cc: Matthew Maurer <mmaurer@google.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Myugnjoo Ham <myungjoo.ham@samsung.com> Cc: Parav Pandit <parav@nvidia.com> Cc: Pratyush Yadav <pratyush@kernel.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Saeed Mahameed <saeedm@nvidia.com> Cc: Samiullah Khawaja <skhawaja@google.com> Cc: Song Liu <song@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Stuart Hayes <stuart.w.hayes@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Thomas Weißschuh <linux@weissschuh.net> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: William Tu <witu@nvidia.com> Cc: Yoann Congal <yoann.congal@smile.fr> Cc: Zhu Yanjun <yanjun.zhu@linux.dev> Cc: Zijun Hu <quic_zijuhu@quicinc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent e165e2a commit ed6f45f

2 files changed

Lines changed: 11 additions & 5 deletions

File tree

mm/internal.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1562,6 +1562,12 @@ void __meminit __init_page_from_nid(unsigned long pfn, int nid);
15621562
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
15631563
int priority);
15641564

1565+
int shmem_add_to_page_cache(struct folio *folio,
1566+
struct address_space *mapping,
1567+
pgoff_t index, void *expected, gfp_t gfp);
1568+
int shmem_inode_acct_blocks(struct inode *inode, long pages);
1569+
bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped);
1570+
15651571
#ifdef CONFIG_SHRINKER_DEBUG
15661572
static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
15671573
struct shrinker *shrinker, const char *fmt, va_list ap)

mm/shmem.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
219219
vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
220220
}
221221

222-
static int shmem_inode_acct_blocks(struct inode *inode, long pages)
222+
int shmem_inode_acct_blocks(struct inode *inode, long pages)
223223
{
224224
struct shmem_inode_info *info = SHMEM_I(inode);
225225
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
@@ -435,7 +435,7 @@ static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
435435
*
436436
* Return: true if swapped was incremented from 0, for shmem_writeout().
437437
*/
438-
static bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
438+
bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
439439
{
440440
struct shmem_inode_info *info = SHMEM_I(inode);
441441
bool first_swapped = false;
@@ -861,9 +861,9 @@ static void shmem_update_stats(struct folio *folio, int nr_pages)
861861
/*
862862
* Somewhat like filemap_add_folio, but error if expected item has gone.
863863
*/
864-
static int shmem_add_to_page_cache(struct folio *folio,
865-
struct address_space *mapping,
866-
pgoff_t index, void *expected, gfp_t gfp)
864+
int shmem_add_to_page_cache(struct folio *folio,
865+
struct address_space *mapping,
866+
pgoff_t index, void *expected, gfp_t gfp)
867867
{
868868
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
869869
unsigned long nr = folio_nr_pages(folio);

0 commit comments

Comments
 (0)