mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
mm: add an 'end' parameter to find_get_entries
This simplifies the callers and leads to a more efficient implementation since the XArray has this functionality already. Link: https://lkml.kernel.org/r/20201112212641.27837-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Dave Chinner <dchinner@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5c211ba29d
commit
ca122fe40e
4 changed files with 10 additions and 15 deletions
|
@ -451,7 +451,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
|
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
|
||||||
unsigned int nr_entries, struct page **entries,
|
pgoff_t end, unsigned int nr_entries, struct page **entries,
|
||||||
pgoff_t *indices);
|
pgoff_t *indices);
|
||||||
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
|
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
|
||||||
pgoff_t end, unsigned int nr_pages,
|
pgoff_t end, unsigned int nr_pages,
|
||||||
|
|
|
@ -1865,6 +1865,7 @@ reset:
|
||||||
* find_get_entries - gang pagecache lookup
|
* find_get_entries - gang pagecache lookup
|
||||||
* @mapping: The address_space to search
|
* @mapping: The address_space to search
|
||||||
* @start: The starting page cache index
|
* @start: The starting page cache index
|
||||||
|
* @end: The final page index (inclusive).
|
||||||
* @nr_entries: The maximum number of entries
|
* @nr_entries: The maximum number of entries
|
||||||
* @entries: Where the resulting entries are placed
|
* @entries: Where the resulting entries are placed
|
||||||
* @indices: The cache indices corresponding to the entries in @entries
|
* @indices: The cache indices corresponding to the entries in @entries
|
||||||
|
@ -1888,9 +1889,9 @@ reset:
|
||||||
*
|
*
|
||||||
* Return: the number of pages and shadow entries which were found.
|
* Return: the number of pages and shadow entries which were found.
|
||||||
*/
|
*/
|
||||||
unsigned find_get_entries(struct address_space *mapping,
|
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
|
||||||
pgoff_t start, unsigned int nr_entries,
|
pgoff_t end, unsigned int nr_entries, struct page **entries,
|
||||||
struct page **entries, pgoff_t *indices)
|
pgoff_t *indices)
|
||||||
{
|
{
|
||||||
XA_STATE(xas, &mapping->i_pages, start);
|
XA_STATE(xas, &mapping->i_pages, start);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -1900,7 +1901,7 @@ unsigned find_get_entries(struct address_space *mapping,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) {
|
while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
|
||||||
/*
|
/*
|
||||||
* Terminate early on finding a THP, to allow the caller to
|
* Terminate early on finding a THP, to allow the caller to
|
||||||
* handle it all at once; but continue if this is hugetlbfs.
|
* handle it all at once; but continue if this is hugetlbfs.
|
||||||
|
|
10
mm/shmem.c
10
mm/shmem.c
|
@ -913,8 +913,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||||
struct page *page = pvec.pages[i];
|
struct page *page = pvec.pages[i];
|
||||||
|
|
||||||
index = indices[i];
|
index = indices[i];
|
||||||
if (index >= end)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (xa_is_value(page)) {
|
if (xa_is_value(page)) {
|
||||||
if (unfalloc)
|
if (unfalloc)
|
||||||
|
@ -967,9 +965,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
pvec.nr = find_get_entries(mapping, index,
|
pvec.nr = find_get_entries(mapping, index, end - 1,
|
||||||
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
PAGEVEC_SIZE, pvec.pages, indices);
|
||||||
pvec.pages, indices);
|
|
||||||
if (!pvec.nr) {
|
if (!pvec.nr) {
|
||||||
/* If all gone or hole-punch or unfalloc, we're done */
|
/* If all gone or hole-punch or unfalloc, we're done */
|
||||||
if (index == start || end != -1)
|
if (index == start || end != -1)
|
||||||
|
@ -982,9 +979,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||||
struct page *page = pvec.pages[i];
|
struct page *page = pvec.pages[i];
|
||||||
|
|
||||||
index = indices[i];
|
index = indices[i];
|
||||||
if (index >= end)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (xa_is_value(page)) {
|
if (xa_is_value(page)) {
|
||||||
if (unfalloc)
|
if (unfalloc)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1046,7 +1046,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
|
||||||
pgoff_t start, unsigned nr_entries,
|
pgoff_t start, unsigned nr_entries,
|
||||||
pgoff_t *indices)
|
pgoff_t *indices)
|
||||||
{
|
{
|
||||||
pvec->nr = find_get_entries(mapping, start, nr_entries,
|
pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries,
|
||||||
pvec->pages, indices);
|
pvec->pages, indices);
|
||||||
return pagevec_count(pvec);
|
return pagevec_count(pvec);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue