diff --git a/Documentation/admin-guide/cgroup-v1/memcg_test.rst b/Documentation/admin-guide/cgroup-v1/memcg_test.rst index 45b94f7b3bebb7..a402359abb996b 100644 --- a/Documentation/admin-guide/cgroup-v1/memcg_test.rst +++ b/Documentation/admin-guide/cgroup-v1/memcg_test.rst @@ -97,7 +97,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y. ============= Page Cache is charged at - - add_to_page_cache_locked(). + - filemap_add_folio(). The logic is very clear. (About migration, see below) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 44b9c265a23451..dee519ef7436e2 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1098,8 +1098,6 @@ size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); int filemap_add_folio(struct address_space *mapping, struct folio *folio, @@ -1115,22 +1113,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp); loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, int whence); -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - __SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - __ClearPageLocked(page); - return error; -} - /* Must be non-static for BPF error injection */ int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp); diff --git a/mm/filemap.c b/mm/filemap.c index ffdfbc8b0e3cab..22a17ab256f767 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -929,26 +929,6 @@ noinline int __filemap_add_folio(struct address_space *mapping, } ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); -/** - * add_to_page_cache_locked - add a locked page to the pagecache - * @page: page to add - * @mapping: the page's address_space - * @offset: page index - * @gfp_mask: page allocation mode - * - * This function is used to add a page to the pagecache. It must be locked. - * This function does not add the page to the LRU. The caller must do that. - * - * Return: %0 on success, negative error code otherwise. - */ -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask) -{ - return __filemap_add_folio(mapping, page_folio(page), offset, - gfp_mask, NULL); -} -EXPORT_SYMBOL(add_to_page_cache_locked); - int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp) { diff --git a/mm/shmem.c b/mm/shmem.c index b42dfb01c634b8..6a5e46f1a326e7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -693,7 +693,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * Like add_to_page_cache_locked, but error if expected item has gone. + * Like filemap_add_folio, but error if expected item has gone. */ static int shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, diff --git a/mm/swap_state.c b/mm/swap_state.c index 778d57d2d92daa..f5b6f563890805 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -95,7 +95,7 @@ void *get_shadow_from_swap_cache(swp_entry_t entry) } /* - * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, + * add_to_swap_cache resembles filemap_add_folio on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ int add_to_swap_cache(struct page *page, swp_entry_t entry,