pagecache_get_page

/**
 * pagecache_get_page - find and get a page reference
 * @mapping: the address_space to search
 * @offset: the page index
 * @fgp_flags: PCG flags
 * @gfp_mask: gfp mask to use for the page cache data page allocation
 *
 * Looks up the page cache slot at @mapping & @offset.
 *
 * PCG flags modify how the page is returned.
 *
 * @fgp_flags can be:
 *
 * - FGP_ACCESSED: the page will be marked accessed
 * - FGP_LOCK: Page is return locked
 * - FGP_CREAT: If page is not present then a new page is allocated using
 *   @gfp_mask and added to the page cache and the VM's LRU
 *   list. The page is returned locked and with an increased
 *   refcount.
 * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
 *   its own locking dance if the page is already in cache, or unlock the page
 *   before returning if we had to add the page to pagecache.
 *
 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
 * if the GFP flags specified for FGP_CREAT are atomic.
 *
 * If there is a page cache page, it is returned with an increased refcount.
 *
 * Return: the found page or %NULL otherwise.
 */
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
    int fgp_flags, gfp_t gfp_mask)
{
    struct page *page;

repeat:
    page = find_get_entry(mapping, offset);
    if (xa_is_value(page))
        page = NULL;
    if (!page)
        goto no_page;

    if (fgp_flags & FGP_LOCK) {
        if (fgp_flags & FGP_NOWAIT) {
            if (!trylock_page(page)) {
                put_page(page);
                return NULL;
            }
        } else {
            lock_page(page);
        }

        /* Has the page been truncated? */
        if (unlikely(compound_head(page)->mapping != mapping)) {
            unlock_page(page);
            put_page(page);
            goto repeat;
        }
        VM_BUG_ON_PAGE(page->index != offset, page);
    }

    if (fgp_flags & FGP_ACCESSED)
        mark_page_accessed(page);

no_page:
    if (!page && (fgp_flags & FGP_CREAT)) {
        int err;
        if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
            gfp_mask |= __GFP_WRITE;
        if (fgp_flags & FGP_NOFS)
            gfp_mask &= ~__GFP_FS;

        page = __page_cache_alloc(gfp_mask);
        if (!page)
            return NULL;

        if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
            fgp_flags |= FGP_LOCK;

        /* Init accessed so avoid atomic mark_page_accessed later */
        if (fgp_flags & FGP_ACCESSED)
            __SetPageReferenced(page);

        err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
        if (unlikely(err)) {
            put_page(page);
            page = NULL;
            if (err == -EEXIST)
                goto repeat;
        }

        /*
         * add_to_page_cache_lru locks the page, and for mmap we expect
         * an unlocked page.
         */
        if (page && (fgp_flags & FGP_FOR_MMAP))
            unlock_page(page);
    }

    return page;
}

你可能感兴趣的:(linux,driver,func,linux)